summaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorMartin Sebor <msebor@redhat.com>2019-07-09 18:32:49 +0000
committerMartin Sebor <msebor@gcc.gnu.org>2019-07-09 12:32:49 -0600
commit99b1c316ec974a39bdd949f8559bb28861b69592 (patch)
tree1de4b72ee58329bc7ebad81476075375fee586cf /gcc
parent18c0ed4b46990c504525aa4928aab45907c8256d (diff)
PR c++/61339 - add mismatch between struct and class [-Wmismatched-tags] to non-bugs
gcc/c/ChangeLog: PR c++/61339 * c-decl.c (xref_tag): Change class-key of PODs to struct and others to class. (field_decl_cmp): Same. * c-parser.c (c_parser_struct_or_union_specifier): Same. * c-tree.h: Same. * gimple-parser.c (c_parser_gimple_compound_statement): Same. gcc/c-family/ChangeLog: PR c++/61339 * c-opts.c (handle_deferred_opts): : Change class-key of PODs to struct and others to class. * c-pretty-print.h: Same. gcc/cp/ChangeLog: PR c++/61339 * cp-tree.h: Change class-key of PODs to struct and others to class. * search.c: Same. * semantics.c (finalize_nrv_r): Same. gcc/lto/ChangeLog: PR c++/61339 * lto-common.c (lto_splay_tree_new): : Change class-key of PODs to struct and others to class. (mentions_vars_p): Same. (register_resolution): Same. (lto_register_var_decl_in_symtab): Same. (lto_register_function_decl_in_symtab): Same. (cmp_tree): Same. (lto_read_decls): Same. gcc/ChangeLog: PR c++/61339 * auto-profile.c: Change class-key of PODs to struct and others to class. * basic-block.h: Same. * bitmap.c (bitmap_alloc): Same. * bitmap.h: Same. * builtins.c (expand_builtin_prefetch): Same. (expand_builtin_interclass_mathfn): Same. (expand_builtin_strlen): Same. (expand_builtin_mempcpy_args): Same. (expand_cmpstr): Same. (expand_builtin___clear_cache): Same. (expand_ifn_atomic_bit_test_and): Same. (expand_builtin_thread_pointer): Same. (expand_builtin_set_thread_pointer): Same. * caller-save.c (setup_save_areas): Same. (replace_reg_with_saved_mem): Same. (insert_restore): Same. (insert_save): Same. (add_used_regs): Same. * cfg.c (get_bb_copy): Same. (set_loop_copy): Same. * cfg.h: Same. * cfganal.h: Same. * cfgexpand.c (alloc_stack_frame_space): Same. (add_stack_var): Same. (add_stack_var_conflict): Same. (add_scope_conflicts_1): Same. (update_alias_info_with_stack_vars): Same. (expand_used_vars): Same. * cfghooks.c (redirect_edge_and_branch_force): Same. (delete_basic_block): Same. (split_edge): Same. (make_forwarder_block): Same. (force_nonfallthru): Same. (duplicate_block): Same. (lv_flush_pending_stmts): Same. * cfghooks.h: Same. * cfgloop.c (flow_loops_cfg_dump): Same. (flow_loop_nested_p): Same. (superloop_at_depth): Same. (get_loop_latch_edges): Same. (flow_loop_dump): Same. (flow_loops_dump): Same. (flow_loops_free): Same. (flow_loop_nodes_find): Same. (establish_preds): Same. (flow_loop_tree_node_add): Same. (flow_loop_tree_node_remove): Same. (flow_loops_find): Same. (find_subloop_latch_edge_by_profile): Same. (find_subloop_latch_edge_by_ivs): Same. (mfb_redirect_edges_in_set): Same. (form_subloop): Same. (merge_latch_edges): Same. (disambiguate_multiple_latches): Same. (disambiguate_loops_with_multiple_latches): Same. (flow_bb_inside_loop_p): Same. (glb_enum_p): Same. (get_loop_body_with_size): Same. (get_loop_body): Same. (fill_sons_in_loop): Same. (get_loop_body_in_dom_order): Same. (get_loop_body_in_custom_order): Same. (release_recorded_exits): Same. (get_loop_exit_edges): Same. (num_loop_branches): Same. (remove_bb_from_loops): Same. (find_common_loop): Same. (delete_loop): Same. (cancel_loop): Same. (verify_loop_structure): Same. (loop_preheader_edge): Same. (loop_exit_edge_p): Same. (single_exit): Same. (loop_exits_to_bb_p): Same. (loop_exits_from_bb_p): Same. (get_loop_location): Same. (record_niter_bound): Same. (get_estimated_loop_iterations_int): Same. (max_stmt_executions_int): Same. (likely_max_stmt_executions_int): Same. (get_estimated_loop_iterations): Same. (get_max_loop_iterations): Same. (get_max_loop_iterations_int): Same. (get_likely_max_loop_iterations): Same. * cfgloop.h (simple_loop_desc): Same. (get_loop): Same. (loop_depth): Same. (loop_outer): Same. (loop_iterator::next): Same. (loop_outermost): Same. * cfgloopanal.c (mark_irreducible_loops): Same. (num_loop_insns): Same. (average_num_loop_insns): Same. (expected_loop_iterations_unbounded): Same. (expected_loop_iterations): Same. (mark_loop_exit_edges): Same. (single_likely_exit): Same. * cfgloopmanip.c (fix_bb_placement): Same. (fix_bb_placements): Same. (remove_path): Same. (place_new_loop): Same. (add_loop): Same. (scale_loop_frequencies): Same. (scale_loop_profile): Same. (create_empty_if_region_on_edge): Same. (create_empty_loop_on_edge): Same. (loopify): Same. (unloop): Same. (fix_loop_placements): Same. (copy_loop_info): Same. (duplicate_loop): Same. (duplicate_subloops): Same. (loop_redirect_edge): Same. (can_duplicate_loop_p): Same. (duplicate_loop_to_header_edge): Same. (mfb_keep_just): Same. (has_preds_from_loop): Same. (create_preheader): Same. (create_preheaders): Same. (lv_adjust_loop_entry_edge): Same. (loop_version): Same. * cfgloopmanip.h: Same. * cgraph.h: Same. * cgraphbuild.c: Same. * combine.c (make_extraction): Same. * config/i386/i386-features.c: Same. * config/i386/i386-features.h: Same. * config/i386/i386.c (ix86_emit_outlined_ms2sysv_save): Same. (ix86_emit_outlined_ms2sysv_restore): Same. (ix86_noce_conversion_profitable_p): Same. (ix86_init_cost): Same. (ix86_simd_clone_usable): Same. * configure.ac: Same. * coretypes.h: Same. * data-streamer-in.c (string_for_index): Same. (streamer_read_indexed_string): Same. (streamer_read_string): Same. (bp_unpack_indexed_string): Same. (bp_unpack_string): Same. (streamer_read_uhwi): Same. (streamer_read_hwi): Same. (streamer_read_gcov_count): Same. (streamer_read_wide_int): Same. * data-streamer.h (streamer_write_bitpack): Same. (bp_unpack_value): Same. (streamer_write_char_stream): Same. (streamer_write_hwi_in_range): Same. (streamer_write_record_start): Same. * ddg.c (create_ddg_dep_from_intra_loop_link): Same. (add_cross_iteration_register_deps): Same. (build_intra_loop_deps): Same. * df-core.c (df_analyze): Same. (loop_post_order_compute): Same. (loop_inverted_post_order_compute): Same. * df-problems.c (df_rd_alloc): Same. (df_rd_simulate_one_insn): Same. (df_rd_local_compute): Same. (df_rd_init_solution): Same. (df_rd_confluence_n): Same. (df_rd_transfer_function): Same. (df_rd_free): Same. (df_rd_dump_defs_set): Same. (df_rd_top_dump): Same. (df_lr_alloc): Same. (df_lr_reset): Same. (df_lr_local_compute): Same. (df_lr_init): Same. (df_lr_confluence_n): Same. (df_lr_free): Same. (df_lr_top_dump): Same. (df_lr_verify_transfer_functions): Same. (df_live_alloc): Same. (df_live_reset): Same. (df_live_init): Same. (df_live_confluence_n): Same. (df_live_finalize): Same. (df_live_free): Same. (df_live_top_dump): Same. (df_live_verify_transfer_functions): Same. (df_mir_alloc): Same. (df_mir_reset): Same. (df_mir_init): Same. (df_mir_confluence_n): Same. (df_mir_free): Same. (df_mir_top_dump): Same. (df_word_lr_alloc): Same. (df_word_lr_reset): Same. (df_word_lr_init): Same. (df_word_lr_confluence_n): Same. (df_word_lr_free): Same. (df_word_lr_top_dump): Same. (df_md_alloc): Same. (df_md_simulate_one_insn): Same. (df_md_reset): Same. (df_md_init): Same. (df_md_free): Same. (df_md_top_dump): Same. * df-scan.c (df_insn_delete): Same. (df_insn_rescan): Same. (df_notes_rescan): Same. (df_sort_and_compress_mws): Same. (df_install_mws): Same. (df_refs_add_to_chains): Same. (df_ref_create_structure): Same. (df_ref_record): Same. (df_def_record_1): Same. (df_find_hard_reg_defs): Same. (df_uses_record): Same. (df_get_conditional_uses): Same. (df_get_call_refs): Same. (df_recompute_luids): Same. (df_get_entry_block_def_set): Same. (df_entry_block_defs_collect): Same. (df_get_exit_block_use_set): Same. (df_exit_block_uses_collect): Same. (df_mws_verify): Same. (df_bb_verify): Same. * df.h (df_scan_get_bb_info): Same. * doc/tm.texi: Same. * dse.c (record_store): Same. * dumpfile.h: Same. * emit-rtl.c (const_fixed_hasher::equal): Same. (set_mem_attributes_minus_bitpos): Same. (change_address): Same. (adjust_address_1): Same. (offset_address): Same. * emit-rtl.h: Same. * except.c (dw2_build_landing_pads): Same. (sjlj_emit_dispatch_table): Same. * explow.c (allocate_dynamic_stack_space): Same. (emit_stack_probe): Same. (probe_stack_range): Same. * expmed.c (store_bit_field_using_insv): Same. (store_bit_field_1): Same. (store_integral_bit_field): Same. (extract_bit_field_using_extv): Same. (extract_bit_field_1): Same. (emit_cstore): Same. * expr.c (emit_block_move_via_cpymem): Same. (expand_cmpstrn_or_cmpmem): Same. (set_storage_via_setmem): Same. (emit_single_push_insn_1): Same. (expand_assignment): Same. (store_constructor): Same. (expand_expr_real_2): Same. (expand_expr_real_1): Same. (try_casesi): Same. * flags.h: Same. * function.c (try_fit_stack_local): Same. (assign_stack_local_1): Same. (assign_stack_local): Same. (cut_slot_from_list): Same. (insert_slot_to_list): Same. (max_slot_level): Same. (move_slot_to_level): Same. (temp_address_hasher::equal): Same. (remove_unused_temp_slot_addresses): Same. (assign_temp): Same. (combine_temp_slots): Same. (update_temp_slot_address): Same. (preserve_temp_slots): Same. * function.h: Same. * fwprop.c: Same. * gcc-rich-location.h: Same. * gcov.c: Same. * genattrtab.c (check_attr_test): Same. (check_attr_value): Same. (convert_set_attr_alternative): Same. (convert_set_attr): Same. (check_defs): Same. (copy_boolean): Same. (get_attr_value): Same. (expand_delays): Same. (make_length_attrs): Same. (min_fn): Same. (make_alternative_compare): Same. (simplify_test_exp): Same. (tests_attr_p): Same. (get_attr_order): Same. (clear_struct_flag): Same. (gen_attr): Same. (compares_alternatives_p): Same. (gen_insn): Same. (gen_delay): Same. (find_attrs_to_cache): Same. (write_test_expr): Same. (walk_attr_value): Same. (write_attr_get): Same. (eliminate_known_true): Same. (write_insn_cases): Same. (write_attr_case): Same. (write_attr_valueq): Same. (write_attr_value): Same. (write_dummy_eligible_delay): Same. (next_comma_elt): Same. (find_attr): Same. (make_internal_attr): Same. (copy_rtx_unchanging): Same. (gen_insn_reserv): Same. (check_tune_attr): Same. (make_automaton_attrs): Same. (handle_arg): Same. * genextract.c (gen_insn): Same. (VEC_char_to_string): Same. * genmatch.c (print_operand): Same. (lower): Same. (parser::parse_operation): Same. (parser::parse_capture): Same. (parser::parse_c_expr): Same. (parser::parse_simplify): Same. (main): Same. * genoutput.c (output_operand_data): Same. (output_get_insn_name): Same. (compare_operands): Same. (place_operands): Same. (process_template): Same. (validate_insn_alternatives): Same. (validate_insn_operands): Same. (gen_expand): Same. (note_constraint): Same. * genpreds.c (write_one_predicate_function): Same. (add_constraint): Same. (process_define_register_constraint): Same. (write_lookup_constraint_1): Same. (write_lookup_constraint_array): Same. (write_insn_constraint_len): Same. (write_reg_class_for_constraint_1): Same. (write_constraint_satisfied_p_array): Same. * genrecog.c (optimize_subroutine_group): Same. * gensupport.c (process_define_predicate): Same. (queue_pattern): Same. (remove_from_queue): Same. (process_rtx): Same. (is_predicable): Same. (change_subst_attribute): Same. (subst_pattern_match): Same. (alter_constraints): Same. (alter_attrs_for_insn): Same. (shift_output_template): Same. (alter_output_for_subst_insn): Same. (process_one_cond_exec): Same. (subst_dup): Same. (process_define_cond_exec): Same. (mnemonic_htab_callback): Same. (gen_mnemonic_attr): Same. (read_md_rtx): Same. * ggc-page.c: Same. * gimple-loop-interchange.cc (dump_reduction): Same. (dump_induction): Same. (loop_cand::~loop_cand): Same. (free_data_refs_with_aux): Same. (tree_loop_interchange::interchange_loops): Same. (tree_loop_interchange::map_inductions_to_loop): Same. (tree_loop_interchange::move_code_to_inner_loop): Same. (compute_access_stride): Same. (compute_access_strides): Same. (proper_loop_form_for_interchange): Same. (tree_loop_interchange_compute_ddrs): Same. (prune_datarefs_not_in_loop): Same. (prepare_data_references): Same. (pass_linterchange::execute): Same. * gimple-loop-jam.c (bb_prevents_fusion_p): Same. (unroll_jam_possible_p): Same. (fuse_loops): Same. (adjust_unroll_factor): Same. (tree_loop_unroll_and_jam): Same. * gimple-loop-versioning.cc (loop_versioning::~loop_versioning): Same. (loop_versioning::expensive_stmt_p): Same. (loop_versioning::version_for_unity): Same. (loop_versioning::dump_inner_likelihood): Same. (loop_versioning::find_per_loop_multiplication): Same. (loop_versioning::analyze_term_using_scevs): Same. (loop_versioning::record_address_fragment): Same. (loop_versioning::analyze_expr): Same. (loop_versioning::analyze_blocks): Same. (loop_versioning::prune_conditions): Same. (loop_versioning::merge_loop_info): Same. (loop_versioning::add_loop_to_queue): Same. (loop_versioning::decide_whether_loop_is_versionable): Same. (loop_versioning::make_versioning_decisions): Same. (loop_versioning::implement_versioning_decisions): Same. * gimple-ssa-evrp-analyze.c (evrp_range_analyzer::record_ranges_from_phis): Same. * gimple-ssa-store-merging.c (split_store::split_store): Same. (count_multiple_uses): Same. (split_group): Same. (imm_store_chain_info::output_merged_store): Same. (pass_store_merging::process_store): Same. * gimple-ssa-strength-reduction.c (slsr_process_phi): Same. * gimple-ssa-warn-alloca.c (adjusted_warn_limit): Same. (is_max): Same. (alloca_call_type): Same. (pass_walloca::execute): Same. * gimple-streamer-in.c (input_phi): Same. (input_gimple_stmt): Same. * gimple-streamer.h: Same. * godump.c (go_force_record_alignment): Same. (go_format_type): Same. (go_output_type): Same. (go_output_fndecl): Same. (go_output_typedef): Same. (keyword_hash_init): Same. (find_dummy_types): Same. * graph.c (draw_cfg_nodes_no_loops): Same. (draw_cfg_nodes_for_loop): Same. * hard-reg-set.h (hard_reg_set_iter_next): Same. * hsa-brig.c: Same. * hsa-common.h (hsa_internal_fn_hasher::equal): Same. * hsa-dump.c (dump_hsa_cfun): Same. * hsa-gen.c (gen_function_def_parameters): Same. * hsa-regalloc.c (dump_hsa_cfun_regalloc): Same. * input.c (dump_line_table_statistics): Same. (test_lexer): Same. * input.h: Same. * internal-fn.c (get_multi_vector_move): Same. (expand_load_lanes_optab_fn): Same. (expand_GOMP_SIMT_ENTER_ALLOC): Same. (expand_GOMP_SIMT_EXIT): Same. (expand_GOMP_SIMT_LAST_LANE): Same. (expand_GOMP_SIMT_ORDERED_PRED): Same. (expand_GOMP_SIMT_VOTE_ANY): Same. (expand_GOMP_SIMT_XCHG_BFLY): Same. (expand_GOMP_SIMT_XCHG_IDX): Same. (expand_addsub_overflow): Same. (expand_neg_overflow): Same. (expand_mul_overflow): Same. (expand_call_mem_ref): Same. (expand_mask_load_optab_fn): Same. (expand_scatter_store_optab_fn): Same. (expand_gather_load_optab_fn): Same. * ipa-cp.c (ipa_get_parm_lattices): Same. (print_all_lattices): Same. (ignore_edge_p): Same. (build_toporder_info): Same. (free_toporder_info): Same. (push_node_to_stack): Same. (ipcp_lattice<valtype>::set_contains_variable): Same. (set_agg_lats_to_bottom): Same. (ipcp_bits_lattice::meet_with): Same. (set_single_call_flag): Same. (initialize_node_lattices): Same. (ipa_get_jf_ancestor_result): Same. (ipcp_verify_propagated_values): Same. (propagate_scalar_across_jump_function): Same. (propagate_context_across_jump_function): Same. (propagate_bits_across_jump_function): Same. (ipa_vr_operation_and_type_effects): Same. (propagate_vr_across_jump_function): Same. (set_check_aggs_by_ref): Same. (set_chain_of_aglats_contains_variable): Same. (merge_aggregate_lattices): Same. (agg_pass_through_permissible_p): Same. (propagate_aggs_across_jump_function): Same. (call_passes_through_thunk_p): Same. (propagate_constants_across_call): Same. (devirtualization_time_bonus): Same. (good_cloning_opportunity_p): Same. (context_independent_aggregate_values): Same. (gather_context_independent_values): Same. (perform_estimation_of_a_value): Same. (estimate_local_effects): Same. (value_topo_info<valtype>::add_val): Same. (add_all_node_vals_to_toposort): Same. (value_topo_info<valtype>::propagate_effects): Same. (ipcp_propagate_stage): Same. (ipcp_discover_new_direct_edges): Same. (same_node_or_its_all_contexts_clone_p): Same. (cgraph_edge_brings_value_p): Same. (gather_edges_for_value): Same. (create_specialized_node): Same. (find_more_scalar_values_for_callers_subset): Same. (find_more_contexts_for_caller_subset): Same. (copy_plats_to_inter): Same. (intersect_aggregates_with_edge): Same. (find_aggregate_values_for_callers_subset): Same. (cgraph_edge_brings_all_agg_vals_for_node): Same. (decide_about_value): Same. (decide_whether_version_node): Same. (spread_undeadness): Same. (identify_dead_nodes): Same. (ipcp_store_vr_results): Same. * ipa-devirt.c (final_warning_record::grow_type_warnings): Same. * ipa-fnsummary.c (ipa_fn_summary::account_size_time): Same. (redirect_to_unreachable): Same. (edge_set_predicate): Same. (evaluate_conditions_for_known_args): Same. (evaluate_properties_for_edge): Same. (ipa_fn_summary_t::duplicate): Same. (ipa_call_summary_t::duplicate): Same. (dump_ipa_call_summary): Same. (ipa_dump_fn_summary): Same. (eliminated_by_inlining_prob): Same. (set_cond_stmt_execution_predicate): Same. (set_switch_stmt_execution_predicate): Same. (compute_bb_predicates): Same. (will_be_nonconstant_expr_predicate): Same. (phi_result_unknown_predicate): Same. (analyze_function_body): Same. (compute_fn_summary): Same. (estimate_edge_devirt_benefit): Same. (estimate_edge_size_and_time): Same. (estimate_calls_size_and_time): Same. (estimate_node_size_and_time): Same. (remap_edge_change_prob): Same. (remap_edge_summaries): Same. (ipa_merge_fn_summary_after_inlining): Same. (ipa_fn_summary_generate): Same. (inline_read_section): Same. (ipa_fn_summary_read): Same. (ipa_fn_summary_write): Same. * ipa-fnsummary.h: Same. * ipa-hsa.c (ipa_hsa_read_section): Same. * ipa-icf-gimple.c (func_checker::compare_loops): Same. * ipa-icf.c (sem_function::param_used_p): Same. * ipa-inline-analysis.c (do_estimate_edge_time): Same. * ipa-inline.c (edge_badness): Same. (inline_small_functions): Same. * ipa-polymorphic-call.c (ipa_polymorphic_call_context::stream_out): Same. * ipa-predicate.c (predicate::remap_after_duplication): Same. (predicate::remap_after_inlining): Same. (predicate::stream_out): Same. * ipa-predicate.h: Same. * ipa-profile.c (ipa_profile_read_summary): Same. * ipa-prop.c (ipa_get_param_decl_index_1): Same. (count_formal_params): Same. (ipa_dump_param): Same. (ipa_alloc_node_params): Same. (ipa_print_node_jump_functions_for_edge): Same. (ipa_print_node_jump_functions): Same. (ipa_load_from_parm_agg): Same. (get_ancestor_addr_info): Same. (ipa_compute_jump_functions_for_edge): Same. (ipa_analyze_virtual_call_uses): Same. (ipa_analyze_stmt_uses): Same. (ipa_analyze_params_uses_in_bb): Same. (update_jump_functions_after_inlining): Same. (try_decrement_rdesc_refcount): Same. (ipa_impossible_devirt_target): Same. (update_indirect_edges_after_inlining): Same. (combine_controlled_uses_counters): Same. (ipa_edge_args_sum_t::duplicate): Same. (ipa_write_jump_function): Same. (ipa_write_indirect_edge_info): Same. (ipa_write_node_info): Same. (ipa_read_edge_info): Same. (ipa_prop_read_section): Same. (read_replacements_section): Same. * ipa-prop.h (ipa_get_param_count): Same. (ipa_get_param): Same. (ipa_get_type): Same. (ipa_get_param_move_cost): Same. (ipa_set_param_used): Same. (ipa_get_controlled_uses): Same. (ipa_set_controlled_uses): Same. (ipa_get_cs_argument_count): Same. * ipa-pure-const.c (analyze_function): Same. (pure_const_read_summary): Same. * ipa-ref.h: Same. * ipa-reference.c (ipa_reference_read_optimization_summary): Same. * ipa-split.c (test_nonssa_use): Same. (dump_split_point): Same. (dominated_by_forbidden): Same. (split_part_set_ssa_name_p): Same. (find_split_points): Same. * ira-build.c (finish_loop_tree_nodes): Same. (low_pressure_loop_node_p): Same. * ira-color.c (ira_reuse_stack_slot): Same. * ira-int.h: Same. * ira.c (setup_reg_equiv): Same. (print_insn_chain): Same. (ira): Same. * loop-doloop.c (doloop_condition_get): Same. (add_test): Same. (record_reg_sets): Same. (doloop_optimize): Same. * loop-init.c (loop_optimizer_init): Same. (fix_loop_structure): Same. * loop-invariant.c (merge_identical_invariants): Same. (compute_always_reached): Same. (find_exits): Same. (may_assign_reg_p): Same. (find_invariants_bb): Same. (find_invariants_body): Same. (replace_uses): Same. (can_move_invariant_reg): Same. (free_inv_motion_data): Same. (move_single_loop_invariants): Same. (change_pressure): Same. (mark_ref_regs): Same. (calculate_loop_reg_pressure): Same. * loop-iv.c (biv_entry_hasher::equal): Same. (iv_extend_to_rtx_code): Same. (check_iv_ref_table_size): Same. (clear_iv_info): Same. (latch_dominating_def): Same. (iv_get_reaching_def): Same. (iv_constant): Same. (iv_subreg): Same. (iv_extend): Same. (iv_neg): Same. (iv_add): Same. (iv_mult): Same. (get_biv_step): Same. (record_iv): Same. (analyzed_for_bivness_p): Same. (record_biv): Same. (iv_analyze_biv): Same. (iv_analyze_expr): Same. (iv_analyze_def): Same. (iv_analyze_op): Same. (iv_analyze): Same. (iv_analyze_result): Same. (biv_p): Same. (eliminate_implied_conditions): Same. (simplify_using_initial_values): Same. (shorten_into_mode): Same. (canonicalize_iv_subregs): Same. (determine_max_iter): Same. (check_simple_exit): Same. (find_simple_exit): Same. (get_simple_loop_desc): Same. * loop-unroll.c (report_unroll): Same. (decide_unrolling): Same. (unroll_loops): Same. (loop_exit_at_end_p): Same. (decide_unroll_constant_iterations): Same. (unroll_loop_constant_iterations): Same. (compare_and_jump_seq): Same. (unroll_loop_runtime_iterations): Same. (decide_unroll_stupid): Same. (unroll_loop_stupid): Same. (referenced_in_one_insn_in_loop_p): Same. (reset_debug_uses_in_loop): Same. (analyze_iv_to_split_insn): Same. * lra-eliminations.c (lra_debug_elim_table): Same. (setup_can_eliminate): Same. (form_sum): Same. (lra_get_elimination_hard_regno): Same. (lra_eliminate_regs_1): Same. (eliminate_regs_in_insn): Same. (update_reg_eliminate): Same. (init_elimination): Same. (lra_eliminate): Same. * lra-int.h: Same. * lra-lives.c (initiate_live_solver): Same. * lra-remat.c (create_remat_bb_data): Same. * lra-spills.c (lra_spill): Same. * lra.c (lra_set_insn_recog_data): Same. (lra_set_used_insn_alternative_by_uid): Same. (init_reg_info): Same. (expand_reg_info): Same. * lto-cgraph.c (output_symtab): Same. (read_identifier): Same. (get_alias_symbol): Same. (input_node): Same. (input_varpool_node): Same. (input_ref): Same. (input_edge): Same. (input_cgraph_1): Same. (input_refs): Same. (input_symtab): Same. (input_offload_tables): Same. (output_cgraph_opt_summary): Same. (input_edge_opt_summary): Same. (input_cgraph_opt_section): Same. * lto-section-in.c (lto_free_raw_section_data): Same. (lto_create_simple_input_block): Same. (lto_free_function_in_decl_state_for_node): Same. * lto-streamer-in.c (lto_tag_check_set): Same. (lto_location_cache::revert_location_cache): Same. (lto_location_cache::input_location): Same. (lto_input_location): Same. (stream_input_location_now): Same. (lto_input_tree_ref): Same. (lto_input_eh_catch_list): Same. (input_eh_region): Same. (lto_init_eh): Same. (make_new_block): Same. (input_cfg): Same. (fixup_call_stmt_edges): Same. (input_struct_function_base): Same. (input_function): Same. (lto_read_body_or_constructor): Same. (lto_read_tree_1): Same. (lto_read_tree): Same. (lto_input_scc): Same. (lto_input_tree_1): Same. (lto_input_toplevel_asms): Same. (lto_input_mode_table): Same. (lto_reader_init): Same. (lto_data_in_create): Same. * lto-streamer-out.c (output_cfg): Same. * lto-streamer.h: Same. * modulo-sched.c (duplicate_insns_of_cycles): Same. (generate_prolog_epilog): Same. (mark_loop_unsched): Same. (dump_insn_location): Same. (loop_canon_p): Same. (sms_schedule): Same. * omp-expand.c (expand_omp_for_ordered_loops): Same. (expand_omp_for_generic): Same. (expand_omp_for_static_nochunk): Same. (expand_omp_for_static_chunk): Same. (expand_omp_simd): Same. (expand_omp_taskloop_for_inner): Same. (expand_oacc_for): Same. (expand_omp_atomic_pipeline): Same. (mark_loops_in_oacc_kernels_region): Same. * omp-offload.c (oacc_xform_loop): Same. * omp-simd-clone.c (simd_clone_adjust): Same. * optabs-query.c (get_traditional_extraction_insn): Same. * optabs.c (expand_vector_broadcast): Same. (expand_binop_directly): Same. (expand_twoval_unop): Same. (expand_twoval_binop): Same. (expand_unop_direct): Same. (emit_indirect_jump): Same. (emit_conditional_move): Same. (emit_conditional_neg_or_complement): Same. (emit_conditional_add): Same. (vector_compare_rtx): Same. (expand_vec_perm_1): Same. (expand_vec_perm_const): Same. (expand_vec_cond_expr): Same. (expand_vec_series_expr): Same. (maybe_emit_atomic_exchange): Same. (maybe_emit_sync_lock_test_and_set): Same. (expand_atomic_compare_and_swap): Same. (expand_atomic_load): Same. (expand_atomic_store): Same. (maybe_emit_op): Same. (valid_multiword_target_p): Same. (create_integer_operand): Same. (maybe_legitimize_operand_same_code): Same. (maybe_legitimize_operand): Same. (create_convert_operand_from_type): Same. (can_reuse_operands_p): Same. (maybe_legitimize_operands): Same. (maybe_gen_insn): Same. (maybe_expand_insn): Same. (maybe_expand_jump_insn): Same. (expand_insn): Same. * optabs.h (create_expand_operand): Same. (create_fixed_operand): Same. (create_output_operand): Same. (create_input_operand): Same. (create_convert_operand_to): Same. (create_convert_operand_from): Same. * optinfo.h: Same. * poly-int.h: Same. * predict.c (optimize_insn_for_speed_p): Same. (optimize_loop_for_size_p): Same. (optimize_loop_for_speed_p): Same. (optimize_loop_nest_for_speed_p): Same. (get_base_value): Same. (predicted_by_loop_heuristics_p): Same. (predict_extra_loop_exits): Same. (predict_loops): Same. (predict_paths_for_bb): Same. (predict_paths_leading_to): Same. (propagate_freq): Same. (pass_profile::execute): Same. * predict.h: Same. * profile-count.c (profile_count::differs_from_p): Same. (profile_probability::differs_lot_from_p): Same. * profile-count.h: Same. * profile.c (branch_prob): Same. * regrename.c (free_chain_data): Same. (mark_conflict): Same. (create_new_chain): Same. (merge_overlapping_regs): Same. (init_rename_info): Same. (merge_chains): Same. (regrename_analyze): Same. (regrename_do_replace): Same. (scan_rtx_reg): Same. (record_out_operands): Same. (build_def_use): Same. * regrename.h: Same. * reload.h: Same. * reload1.c (init_reload): Same. (maybe_fix_stack_asms): Same. (copy_reloads): Same. (count_pseudo): Same. (count_spilled_pseudo): Same. (find_reg): Same. (find_reload_regs): Same. (select_reload_regs): Same. (spill_hard_reg): Same. (fixup_eh_region_note): Same. (set_reload_reg): Same. (allocate_reload_reg): Same. (compute_reload_subreg_offset): Same. (reload_adjust_reg_for_icode): Same. (emit_input_reload_insns): Same. (emit_output_reload_insns): Same. (do_input_reload): Same. (inherit_piecemeal_p): Same. * rtl.h: Same. * sanopt.c (maybe_get_dominating_check): Same. (maybe_optimize_ubsan_ptr_ifn): Same. (can_remove_asan_check): Same. (maybe_optimize_asan_check_ifn): Same. (sanopt_optimize_walker): Same. * sched-deps.c (add_dependence_list): Same. (chain_to_prev_insn): Same. (add_insn_mem_dependence): Same. (create_insn_reg_set): Same. (maybe_extend_reg_info_p): Same. (sched_analyze_reg): Same. (sched_analyze_1): Same. (get_implicit_reg_pending_clobbers): Same. (chain_to_prev_insn_p): Same. (deps_analyze_insn): Same. (deps_start_bb): Same. (sched_free_deps): Same. (init_deps): Same. (init_deps_reg_last): Same. (free_deps): Same. * sched-ebb.c: Same. * sched-int.h: Same. * sched-rgn.c (add_branch_dependences): Same. (concat_insn_mem_list): Same. (deps_join): Same. (sched_rgn_compute_dependencies): Same. * sel-sched-ir.c (reset_target_context): Same. (copy_deps_context): Same. (init_id_from_df): Same. (has_dependence_p): Same. (change_loops_latches): Same. (bb_top_order_comparator): Same. (make_region_from_loop_preheader): Same. (sel_init_pipelining): Same. (get_loop_nest_for_rgn): Same. (make_regions_from_the_rest): Same. (sel_is_loop_preheader_p): Same. * sel-sched-ir.h (inner_loop_header_p): Same. (get_all_loop_exits): Same. * selftest.h: Same. * sese.c (sese_build_liveouts): Same. (sese_insert_phis_for_liveouts): Same. * sese.h (defined_in_sese_p): Same. * sreal.c (sreal::stream_out): Same. * sreal.h: Same. * streamer-hooks.h: Same. * target-globals.c (save_target_globals): Same. * target-globals.h: Same. * target.def: Same. * target.h: Same. * targhooks.c (default_has_ifunc_p): Same. (default_empty_mask_is_expensive): Same. (default_init_cost): Same. * targhooks.h: Same. * toplev.c: Same. * tree-affine.c (aff_combination_mult): Same. (aff_combination_expand): Same. (aff_combination_constant_multiple_p): Same. * tree-affine.h: Same. * tree-cfg.c (build_gimple_cfg): Same. (replace_loop_annotate_in_block): Same. (replace_uses_by): Same. (remove_bb): Same. (dump_cfg_stats): Same. (gimple_duplicate_sese_region): Same. (gimple_duplicate_sese_tail): Same. (move_block_to_fn): Same. (replace_block_vars_by_duplicates): Same. (move_sese_region_to_fn): Same. (print_loops_bb): Same. (print_loop): Same. (print_loops): Same. (debug): Same. (debug_loops): Same. * tree-cfg.h: Same. * tree-chrec.c (chrec_fold_plus_poly_poly): Same. (chrec_fold_multiply_poly_poly): Same. (chrec_evaluate): Same. (chrec_component_in_loop_num): Same. (reset_evolution_in_loop): Same. (is_multivariate_chrec): Same. (chrec_contains_symbols): Same. (nb_vars_in_chrec): Same. (chrec_convert_1): Same. (chrec_convert_aggressive): Same. * tree-chrec.h: Same. * tree-core.h: Same. * tree-data-ref.c (dump_data_dependence_relation): Same. (canonicalize_base_object_address): Same. (data_ref_compare_tree): Same. (prune_runtime_alias_test_list): Same. (get_segment_min_max): Same. (create_intersect_range_checks): Same. (conflict_fn_no_dependence): Same. (object_address_invariant_in_loop_p): Same. (analyze_ziv_subscript): Same. (analyze_siv_subscript_cst_affine): Same. (analyze_miv_subscript): Same. (analyze_overlapping_iterations): Same. (build_classic_dist_vector_1): Same. (add_other_self_distances): Same. (same_access_functions): Same. (build_classic_dir_vector): Same. (subscript_dependence_tester_1): Same. (subscript_dependence_tester): Same. (access_functions_are_affine_or_constant_p): Same. (get_references_in_stmt): Same. (loop_nest_has_data_refs): Same. (graphite_find_data_references_in_stmt): Same. (find_data_references_in_bb): Same. (get_base_for_alignment): Same. (find_loop_nest_1): Same. (find_loop_nest): Same. * tree-data-ref.h (dr_alignment): Same. (ddr_dependence_level): Same. * tree-if-conv.c (fold_build_cond_expr): Same. (add_to_predicate_list): Same. (add_to_dst_predicate_list): Same. (phi_convertible_by_degenerating_args): Same. (idx_within_array_bound): Same. (all_preds_critical_p): Same. (pred_blocks_visited_p): Same. (predicate_bbs): Same. (build_region): Same. (if_convertible_loop_p_1): Same. (is_cond_scalar_reduction): Same. (predicate_scalar_phi): Same. (remove_conditions_and_labels): Same. (combine_blocks): Same. (version_loop_for_if_conversion): Same. (versionable_outer_loop_p): Same. (ifcvt_local_dce): Same. (tree_if_conversion): Same. (pass_if_conversion::gate): Same. * tree-if-conv.h: Same. * tree-inline.c (maybe_move_debug_stmts_to_successors): Same. * tree-loop-distribution.c (bb_top_order_cmp): Same. (free_rdg): Same. (stmt_has_scalar_dependences_outside_loop): Same. (copy_loop_before): Same. (create_bb_after_loop): Same. (const_with_all_bytes_same): Same. (generate_memset_builtin): Same. (generate_memcpy_builtin): Same. (destroy_loop): Same. (build_rdg_partition_for_vertex): Same. (compute_access_range): Same. (data_ref_segment_size): Same. (latch_dominated_by_data_ref): Same. (compute_alias_check_pairs): Same. (fuse_memset_builtins): Same. (finalize_partitions): Same. (find_seed_stmts_for_distribution): Same. (prepare_perfect_loop_nest): Same. * tree-parloops.c (lambda_transform_legal_p): Same. (loop_parallel_p): Same. (reduc_stmt_res): Same. (add_field_for_name): Same. (create_call_for_reduction_1): Same. (replace_uses_in_bb_by): Same. (transform_to_exit_first_loop_alt): Same. (try_transform_to_exit_first_loop_alt): Same. (transform_to_exit_first_loop): Same. (num_phis): Same. (gen_parallel_loop): Same. (gather_scalar_reductions): Same. (get_omp_data_i_param): Same. (try_create_reduction_list): Same. (oacc_entry_exit_single_gang): Same. (parallelize_loops): Same. * tree-pass.h: Same. * tree-predcom.c (determine_offset): Same. (last_always_executed_block): Same. (split_data_refs_to_components): Same. (suitable_component_p): Same. (valid_initializer_p): Same. (find_looparound_phi): Same. (insert_looparound_copy): Same. (add_looparound_copies): Same. (determine_roots_comp): Same. (predcom_tmp_var): Same. (initialize_root_vars): Same. (initialize_root_vars_store_elim_1): Same. (initialize_root_vars_store_elim_2): Same. (finalize_eliminated_stores): Same. (initialize_root_vars_lm): Same. (remove_stmt): Same. (determine_unroll_factor): Same. (execute_pred_commoning_cbck): Same. (base_names_in_chain_on): Same. (combine_chains): Same. (pcom_stmt_dominates_stmt_p): Same. (try_combine_chains): Same. (prepare_initializers_chain_store_elim): Same. (prepare_initializers_chain): Same. (prepare_initializers): Same. (prepare_finalizers_chain): Same. (prepare_finalizers): Same. (insert_init_seqs): Same. * tree-scalar-evolution.c (loop_phi_node_p): Same. (compute_overall_effect_of_inner_loop): Same. (add_to_evolution_1): Same. (add_to_evolution): Same. (follow_ssa_edge_binary): Same. (follow_ssa_edge_expr): Same. (backedge_phi_arg_p): Same. (follow_ssa_edge_in_condition_phi_branch): Same. (follow_ssa_edge_in_condition_phi): Same. (follow_ssa_edge_inner_loop_phi): Same. (follow_ssa_edge): Same. (analyze_evolution_in_loop): Same. (analyze_initial_condition): Same. (interpret_loop_phi): Same. (interpret_condition_phi): Same. (interpret_rhs_expr): Same. (interpret_expr): Same. (interpret_gimple_assign): Same. (analyze_scalar_evolution_1): Same. (analyze_scalar_evolution): Same. (analyze_scalar_evolution_for_address_of): Same. (get_instantiated_value_entry): Same. (loop_closed_phi_def): Same. (instantiate_scev_name): Same. (instantiate_scev_poly): Same. (instantiate_scev_binary): Same. (instantiate_scev_convert): Same. (instantiate_scev_not): Same. (instantiate_scev_r): Same. (instantiate_scev): Same. (resolve_mixers): Same. (initialize_scalar_evolutions_analyzer): Same. (scev_reset_htab): Same. (scev_reset): Same. (derive_simple_iv_with_niters): Same. (simple_iv_with_niters): Same. (expression_expensive_p): Same. (final_value_replacement_loop): Same. * tree-scalar-evolution.h (block_before_loop): Same. * tree-ssa-address.h: Same. * tree-ssa-dce.c (find_obviously_necessary_stmts): Same. * tree-ssa-dom.c (edge_info::record_simple_equiv): Same. (record_edge_info): Same. * tree-ssa-live.c (var_map_base_fini): Same. (remove_unused_locals): Same. * tree-ssa-live.h: Same. * tree-ssa-loop-ch.c (should_duplicate_loop_header_p): Same. (pass_ch_vect::execute): Same. (pass_ch::process_loop_p): Same. * tree-ssa-loop-im.c (mem_ref_hasher::hash): Same. (movement_possibility): Same. (outermost_invariant_loop): Same. (stmt_cost): Same. (determine_max_movement): Same. (invariantness_dom_walker::before_dom_children): Same. (move_computations): Same. (may_move_till): Same. (force_move_till_op): Same. (force_move_till): Same. (memref_free): Same. (record_mem_ref_loc): Same. (set_ref_stored_in_loop): Same. (mark_ref_stored): Same. (sort_bbs_in_loop_postorder_cmp): Same. (sort_locs_in_loop_postorder_cmp): Same. (analyze_memory_references): Same. (mem_refs_may_alias_p): Same. (find_ref_loc_in_loop_cmp): Same. (rewrite_mem_ref_loc::operator): Same. (first_mem_ref_loc_1::operator): Same. (sm_set_flag_if_changed::operator): Same. (execute_sm_if_changed_flag_set): Same. (execute_sm): Same. (hoist_memory_references): Same. (ref_always_accessed::operator): Same. (refs_independent_p): Same. (record_dep_loop): Same. (ref_indep_loop_p_1): Same. (ref_indep_loop_p): Same. (can_sm_ref_p): Same. (find_refs_for_sm): Same. (loop_suitable_for_sm): Same. (store_motion_loop): Same. (store_motion): Same. (fill_always_executed_in): Same. * tree-ssa-loop-ivcanon.c (constant_after_peeling): Same. (estimated_unrolled_size): Same. (loop_edge_to_cancel): Same. (remove_exits_and_undefined_stmts): Same. (remove_redundant_iv_tests): Same. (unloop_loops): Same. (estimated_peeled_sequence_size): Same. (try_peel_loop): Same. (canonicalize_loop_induction_variables): Same. (canonicalize_induction_variables): Same. * tree-ssa-loop-ivopts.c (iv_inv_expr_hasher::equal): Same. (name_info): Same. (stmt_after_inc_pos): Same. (contains_abnormal_ssa_name_p): Same. (niter_for_exit): Same. (find_bivs): Same. (mark_bivs): Same. (find_givs_in_bb): Same. (find_induction_variables): Same. (find_interesting_uses_cond): Same. (outermost_invariant_loop_for_expr): Same. (idx_find_step): Same. (add_candidate_1): Same. (add_iv_candidate_derived_from_uses): Same. (alloc_use_cost_map): Same. (prepare_decl_rtl): Same. (generic_predict_doloop_p): Same. (computation_cost): Same. (determine_common_wider_type): Same. (get_computation_aff_1): Same. (get_use_type): Same. (determine_group_iv_cost_address): Same. (iv_period): Same. (difference_cannot_overflow_p): Same. (may_eliminate_iv): Same. (determine_set_costs): Same. (cheaper_cost_pair): Same. (compare_cost_pair): Same. (iv_ca_cand_for_group): Same. (iv_ca_recount_cost): Same. (iv_ca_set_remove_invs): Same. (iv_ca_set_no_cp): Same. (iv_ca_set_add_invs): Same. (iv_ca_set_cp): Same. (iv_ca_add_group): Same. (iv_ca_cost): Same. (iv_ca_compare_deps): Same. (iv_ca_delta_reverse): Same. (iv_ca_delta_commit): Same. (iv_ca_cand_used_p): Same. (iv_ca_delta_free): Same. (iv_ca_new): Same. (iv_ca_free): Same. (iv_ca_dump): Same. (iv_ca_extend): Same. (iv_ca_narrow): Same. (iv_ca_prune): Same. (cheaper_cost_with_cand): Same. (iv_ca_replace): Same. (try_add_cand_for): Same. (get_initial_solution): Same. (try_improve_iv_set): Same. (find_optimal_iv_set_1): Same. (create_new_iv): Same. (rewrite_use_compare): Same. (remove_unused_ivs): Same. (determine_scaling_factor): Same. * tree-ssa-loop-ivopts.h: Same. * tree-ssa-loop-manip.c (create_iv): Same. (compute_live_loop_exits): Same. (add_exit_phi): Same. (add_exit_phis): Same. (find_uses_to_rename_use): Same. (find_uses_to_rename_def): Same. (find_uses_to_rename_in_loop): Same. (rewrite_into_loop_closed_ssa): Same. (check_loop_closed_ssa_bb): Same. (split_loop_exit_edge): Same. (ip_end_pos): Same. (ip_normal_pos): Same. (copy_phi_node_args): Same. (gimple_duplicate_loop_to_header_edge): Same. (can_unroll_loop_p): Same. (determine_exit_conditions): Same. (scale_dominated_blocks_in_loop): Same. (niter_for_unrolled_loop): Same. (tree_transform_and_unroll_loop): Same. (rewrite_all_phi_nodes_with_iv): Same. * tree-ssa-loop-manip.h: Same. * tree-ssa-loop-niter.c (number_of_iterations_ne_max): Same. (number_of_iterations_ne): Same. (assert_no_overflow_lt): Same. (assert_loop_rolls_lt): Same. (number_of_iterations_lt): Same. (adjust_cond_for_loop_until_wrap): Same. (tree_simplify_using_condition): Same. (simplify_using_initial_conditions): Same. (simplify_using_outer_evolutions): Same. (loop_only_exit_p): Same. (ssa_defined_by_minus_one_stmt_p): Same. (number_of_iterations_popcount): Same. (number_of_iterations_exit): Same. (find_loop_niter): Same. (finite_loop_p): Same. (chain_of_csts_start): Same. (get_val_for): Same. (loop_niter_by_eval): Same. (derive_constant_upper_bound_ops): Same. (do_warn_aggressive_loop_optimizations): Same. (record_estimate): Same. (get_cst_init_from_scev): Same. (record_nonwrapping_iv): Same. (idx_infer_loop_bounds): Same. (infer_loop_bounds_from_ref): Same. (infer_loop_bounds_from_array): Same. (infer_loop_bounds_from_pointer_arith): Same. (infer_loop_bounds_from_signedness): Same. (bound_index): Same. (discover_iteration_bound_by_body_walk): Same. (maybe_lower_iteration_bound): Same. (estimate_numbers_of_iterations): Same. (estimated_loop_iterations): Same. (estimated_loop_iterations_int): Same. (max_loop_iterations): Same. (max_loop_iterations_int): Same. (likely_max_loop_iterations): Same. (likely_max_loop_iterations_int): Same. (estimated_stmt_executions_int): Same. (max_stmt_executions): Same. (likely_max_stmt_executions): Same. (estimated_stmt_executions): Same. (stmt_dominates_stmt_p): Same. (nowrap_type_p): Same. (loop_exits_before_overflow): Same. (scev_var_range_cant_overflow): Same. (scev_probably_wraps_p): Same. (free_numbers_of_iterations_estimates): Same. * tree-ssa-loop-niter.h: Same. * tree-ssa-loop-prefetch.c (release_mem_refs): Same. (idx_analyze_ref): Same. (analyze_ref): Same. (gather_memory_references_ref): Same. (mark_nontemporal_store): Same. (emit_mfence_after_loop): Same. (may_use_storent_in_loop_p): Same. (mark_nontemporal_stores): Same. (should_unroll_loop_p): Same. (volume_of_dist_vector): Same. (add_subscript_strides): Same. (self_reuse_distance): Same. (insn_to_prefetch_ratio_too_small_p): Same. * tree-ssa-loop-split.c (split_at_bb_p): Same. (patch_loop_exit): Same. (find_or_create_guard_phi): Same. (easy_exit_values): Same. (connect_loop_phis): Same. (connect_loops): Same. (compute_new_first_bound): Same. (split_loop): Same. (tree_ssa_split_loops): Same. * tree-ssa-loop-unswitch.c (tree_ssa_unswitch_loops): Same. (is_maybe_undefined): Same. (tree_may_unswitch_on): Same. (simplify_using_entry_checks): Same. (tree_unswitch_single_loop): Same. (tree_unswitch_loop): Same. (tree_unswitch_outer_loop): Same. (empty_bb_without_guard_p): Same. (used_outside_loop_p): Same. (get_vop_from_header): Same. (hoist_guard): Same. * tree-ssa-loop.c (gate_oacc_kernels): Same. (get_lsm_tmp_name): Same. * tree-ssa-loop.h: Same. * tree-ssa-reassoc.c (add_repeat_to_ops_vec): Same. (build_and_add_sum): Same. (no_side_effect_bb): Same. (get_ops): Same. (linearize_expr): Same. (should_break_up_subtract): Same. (linearize_expr_tree): Same. * tree-ssa-scopedtables.c: Same. * tree-ssa-scopedtables.h: Same. * tree-ssa-structalias.c (condense_visit): Same. (label_visit): Same. (dump_pred_graph): Same. (perform_var_substitution): Same. (move_complex_constraints): Same. (remove_preds_and_fake_succs): Same. * tree-ssa-threadupdate.c (dbds_continue_enumeration_p): Same. (determine_bb_domination_status): Same. (duplicate_thread_path): Same. (thread_through_all_blocks): Same. * tree-ssa-threadupdate.h: Same. * tree-streamer-in.c (streamer_read_string_cst): Same. (input_identifier): Same. (unpack_ts_type_common_value_fields): Same. (unpack_ts_block_value_fields): Same. (unpack_ts_translation_unit_decl_value_fields): Same. (unpack_ts_omp_clause_value_fields): Same. (streamer_read_tree_bitfields): Same. (streamer_alloc_tree): Same. (lto_input_ts_common_tree_pointers): Same. (lto_input_ts_vector_tree_pointers): Same. (lto_input_ts_poly_tree_pointers): Same. (lto_input_ts_complex_tree_pointers): Same. (lto_input_ts_decl_minimal_tree_pointers): Same. (lto_input_ts_decl_common_tree_pointers): Same. (lto_input_ts_decl_non_common_tree_pointers): Same. (lto_input_ts_decl_with_vis_tree_pointers): Same. (lto_input_ts_field_decl_tree_pointers): Same. (lto_input_ts_function_decl_tree_pointers): Same. (lto_input_ts_type_common_tree_pointers): Same. (lto_input_ts_type_non_common_tree_pointers): Same. (lto_input_ts_list_tree_pointers): Same. (lto_input_ts_vec_tree_pointers): Same. (lto_input_ts_exp_tree_pointers): Same. (lto_input_ts_block_tree_pointers): Same. (lto_input_ts_binfo_tree_pointers): Same. (lto_input_ts_constructor_tree_pointers): Same. (lto_input_ts_omp_clause_tree_pointers): Same. (streamer_read_tree_body): Same. * tree-streamer.h: Same. * tree-switch-conversion.c (bit_test_cluster::is_beneficial): Same. * tree-vect-data-refs.c (vect_get_smallest_scalar_type): Same. (vect_analyze_possibly_independent_ddr): Same. (vect_analyze_data_ref_dependence): Same. (vect_compute_data_ref_alignment): Same. (vect_enhance_data_refs_alignment): Same. (vect_analyze_data_ref_access): Same. (vect_check_gather_scatter): Same. (vect_find_stmt_data_reference): Same. (vect_create_addr_base_for_vector_ref): Same. (vect_setup_realignment): Same. (vect_supportable_dr_alignment): Same. * tree-vect-loop-manip.c (rename_variables_in_bb): Same. (adjust_phi_and_debug_stmts): Same. (vect_set_loop_mask): Same. (add_preheader_seq): Same. (vect_maybe_permute_loop_masks): Same. (vect_set_loop_masks_directly): Same. (vect_set_loop_condition_masked): Same. (vect_set_loop_condition_unmasked): Same. (slpeel_duplicate_current_defs_from_edges): Same. (slpeel_add_loop_guard): Same. (slpeel_can_duplicate_loop_p): Same. (create_lcssa_for_virtual_phi): Same. (iv_phi_p): Same. (vect_update_ivs_after_vectorizer): Same. (vect_gen_vector_loop_niters_mult_vf): Same. (slpeel_update_phi_nodes_for_loops): Same. (slpeel_update_phi_nodes_for_guard1): Same. (find_guard_arg): Same. (slpeel_update_phi_nodes_for_guard2): Same. (slpeel_update_phi_nodes_for_lcssa): Same. (vect_do_peeling): Same. (vect_create_cond_for_alias_checks): Same. (vect_loop_versioning): Same. * tree-vect-loop.c (vect_determine_vf_for_stmt): Same. (vect_inner_phi_in_double_reduction_p): Same. (vect_analyze_scalar_cycles_1): Same. (vect_fixup_scalar_cycles_with_patterns): Same. (vect_get_loop_niters): Same. (bb_in_loop_p): Same. (vect_get_max_nscalars_per_iter): Same. (vect_verify_full_masking): Same. (vect_compute_single_scalar_iteration_cost): Same. (vect_analyze_loop_form_1): Same. (vect_analyze_loop_form): Same. (vect_active_double_reduction_p): Same. (vect_analyze_loop_operations): Same. (neutral_op_for_slp_reduction): Same. (vect_is_simple_reduction): Same. (vect_model_reduction_cost): Same. (get_initial_def_for_reduction): Same. (get_initial_defs_for_reduction): Same. (vect_create_epilog_for_reduction): Same. (vectorize_fold_left_reduction): Same. (vectorizable_reduction): Same. (vectorizable_induction): Same. (vectorizable_live_operation): Same. (loop_niters_no_overflow): Same. (vect_get_loop_mask): Same. (vect_transform_loop_stmt): Same. (vect_transform_loop): Same. * tree-vect-patterns.c (vect_reassociating_reduction_p): Same. (vect_determine_precisions): Same. (vect_pattern_recog_1): Same. * tree-vect-slp.c (vect_analyze_slp_instance): Same. * tree-vect-stmts.c (stmt_vectype): Same. (process_use): Same. (vect_init_vector_1): Same. (vect_truncate_gather_scatter_offset): Same. (get_group_load_store_type): Same. (vect_build_gather_load_calls): Same. (vect_get_strided_load_store_ops): Same. (vectorizable_simd_clone_call): Same. (vectorizable_store): Same. (permute_vec_elements): Same. (vectorizable_load): Same. (vect_transform_stmt): Same. (supportable_widening_operation): Same. * tree-vectorizer.c (vec_info::replace_stmt): Same. (vec_info::free_stmt_vec_info): Same. (vect_free_loop_info_assumptions): Same. (vect_loop_vectorized_call): Same. (set_uid_loop_bbs): Same. (vectorize_loops): Same. * tree-vectorizer.h (STMT_VINFO_BB_VINFO): Same. * tree.c (add_tree_to_fld_list): Same. (fld_type_variant_equal_p): Same. (fld_decl_context): Same. (fld_incomplete_type_of): Same. (free_lang_data_in_binfo): Same. (need_assembler_name_p): Same. (find_decls_types_r): Same. (get_eh_types_for_runtime): Same. (find_decls_types_in_eh_region): Same. (find_decls_types_in_node): Same. (assign_assembler_name_if_needed): Same. * value-prof.c (stream_out_histogram_value): Same. * value-prof.h: Same. * var-tracking.c (use_narrower_mode): Same. (prepare_call_arguments): Same. (vt_expand_loc_callback): Same. (resolve_expansions_pending_recursion): Same. (vt_expand_loc): Same. * varasm.c (const_hash_1): Same. (compare_constant): Same. (tree_output_constant_def): Same. (simplify_subtraction): Same. (get_pool_constant): Same. (output_constant_pool_2): Same. (output_constant_pool_1): Same. (mark_constants_in_pattern): Same. (mark_constant_pool): Same. (get_section_anchor): Same. * vr-values.c (compare_range_with_value): Same. (vr_values::extract_range_from_phi_node): Same. * vr-values.h: Same. * web.c (unionfind_union): Same. * wide-int.h: Same. From-SVN: r273311
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog1431
-rw-r--r--gcc/auto-profile.c2
-rw-r--r--gcc/basic-block.h2
-rw-r--r--gcc/bitmap.c2
-rw-r--r--gcc/bitmap.h2
-rw-r--r--gcc/builtins.c18
-rw-r--r--gcc/c-family/ChangeLog7
-rw-r--r--gcc/c-family/c-opts.c2
-rw-r--r--gcc/c-family/c-pretty-print.h2
-rw-r--r--gcc/c/ChangeLog10
-rw-r--r--gcc/c/c-decl.c6
-rw-r--r--gcc/c/c-parser.c2
-rw-r--r--gcc/c/c-tree.h6
-rw-r--r--gcc/c/gimple-parser.c4
-rw-r--r--gcc/caller-save.c24
-rw-r--r--gcc/cfg.c6
-rw-r--r--gcc/cfg.h4
-rw-r--r--gcc/cfganal.h4
-rw-r--r--gcc/cfgexpand.c22
-rw-r--r--gcc/cfghooks.c18
-rw-r--r--gcc/cfghooks.h6
-rw-r--r--gcc/cfgloop.c134
-rw-r--r--gcc/cfgloop.h146
-rw-r--r--gcc/cfgloopanal.c20
-rw-r--r--gcc/cfgloopmanip.c90
-rw-r--r--gcc/cfgloopmanip.h32
-rw-r--r--gcc/cgraph.h15
-rw-r--r--gcc/cgraphbuild.c2
-rw-r--r--gcc/combine.c2
-rw-r--r--gcc/config/i386/i386-features.c2
-rw-r--r--gcc/config/i386/i386-features.h2
-rw-r--r--gcc/config/i386/i386.c10
-rw-r--r--gcc/configure.ac3
-rw-r--r--gcc/coretypes.h46
-rw-r--r--gcc/cp/ChangeLog7
-rw-r--r--gcc/cp/search.c2
-rw-r--r--gcc/cp/semantics.c4
-rw-r--r--gcc/data-streamer-in.c22
-rw-r--r--gcc/data-streamer.h30
-rw-r--r--gcc/ddg.c8
-rw-r--r--gcc/df-core.c10
-rw-r--r--gcc/df-problems.c156
-rw-r--r--gcc/df-scan.c60
-rw-r--r--gcc/df.h30
-rw-r--r--gcc/doc/tm.texi10
-rw-r--r--gcc/dse.c8
-rw-r--r--gcc/dumpfile.h2
-rw-r--r--gcc/emit-rtl.c10
-rw-r--r--gcc/emit-rtl.h10
-rw-r--r--gcc/except.c6
-rw-r--r--gcc/explow.c6
-rw-r--r--gcc/expmed.c14
-rw-r--r--gcc/expr.c22
-rw-r--r--gcc/flags.h4
-rw-r--r--gcc/function.c40
-rw-r--r--gcc/function.h4
-rw-r--r--gcc/fwprop.c4
-rw-r--r--gcc/gcc-rich-location.h2
-rw-r--r--gcc/gcov.c14
-rw-r--r--gcc/genattrtab.c136
-rw-r--r--gcc/genextract.c8
-rw-r--r--gcc/genmatch.c30
-rw-r--r--gcc/genoutput.c46
-rw-r--r--gcc/genpreds.c30
-rw-r--r--gcc/genrecog.c6
-rw-r--r--gcc/gensupport.c110
-rw-r--r--gcc/ggc-page.c2
-rw-r--r--gcc/gimple-loop-interchange.cc62
-rw-r--r--gcc/gimple-loop-jam.c20
-rw-r--r--gcc/gimple-loop-versioning.cc64
-rw-r--r--gcc/gimple-ssa-evrp-analyze.c2
-rw-r--r--gcc/gimple-ssa-store-merging.c20
-rw-r--r--gcc/gimple-ssa-strength-reduction.c8
-rw-r--r--gcc/gimple-ssa-warn-alloca.c8
-rw-r--r--gcc/gimple-streamer-in.c8
-rw-r--r--gcc/gimple-streamer.h2
-rw-r--r--gcc/godump.c16
-rw-r--r--gcc/graph.c4
-rw-r--r--gcc/hard-reg-set.h4
-rw-r--r--gcc/hsa-brig.c5
-rw-r--r--gcc/hsa-common.h10
-rw-r--r--gcc/hsa-dump.c2
-rw-r--r--gcc/hsa-gen.c4
-rw-r--r--gcc/hsa-regalloc.c2
-rw-r--r--gcc/input.c8
-rw-r--r--gcc/input.h4
-rw-r--r--gcc/internal-fn.c34
-rw-r--r--gcc/ipa-cp.c172
-rw-r--r--gcc/ipa-devirt.c2
-rw-r--r--gcc/ipa-fnsummary.c114
-rw-r--r--gcc/ipa-fnsummary.h2
-rw-r--r--gcc/ipa-hsa.c2
-rw-r--r--gcc/ipa-icf-gimple.c4
-rw-r--r--gcc/ipa-icf.c2
-rw-r--r--gcc/ipa-inline-analysis.c6
-rw-r--r--gcc/ipa-inline.c4
-rw-r--r--gcc/ipa-polymorphic-call.c4
-rw-r--r--gcc/ipa-predicate.c8
-rw-r--r--gcc/ipa-predicate.h8
-rw-r--r--gcc/ipa-profile.c2
-rw-r--r--gcc/ipa-prop.c104
-rw-r--r--gcc/ipa-prop.h42
-rw-r--r--gcc/ipa-pure-const.c8
-rw-r--r--gcc/ipa-ref.h4
-rw-r--r--gcc/ipa-reference.c2
-rw-r--r--gcc/ipa-split.c14
-rw-r--r--gcc/ira-build.c8
-rw-r--r--gcc/ira-color.c4
-rw-r--r--gcc/ira-int.h8
-rw-r--r--gcc/ira.c22
-rw-r--r--gcc/loop-doloop.c10
-rw-r--r--gcc/loop-init.c6
-rw-r--r--gcc/loop-invariant.c42
-rw-r--r--gcc/loop-iv.c100
-rw-r--r--gcc/loop-unroll.c62
-rw-r--r--gcc/lra-eliminations.c36
-rw-r--r--gcc/lra-int.h4
-rw-r--r--gcc/lra-lives.c4
-rw-r--r--gcc/lra-remat.c4
-rw-r--r--gcc/lra-spills.c4
-rw-r--r--gcc/lra.c8
-rw-r--r--gcc/lto-cgraph.c30
-rw-r--r--gcc/lto-section-in.c6
-rw-r--r--gcc/lto-streamer-in.c58
-rw-r--r--gcc/lto-streamer-out.c2
-rw-r--r--gcc/lto-streamer.h30
-rw-r--r--gcc/lto/ChangeLog12
-rw-r--r--gcc/lto/lto-common.c14
-rw-r--r--gcc/modulo-sched.c14
-rw-r--r--gcc/omp-expand.c40
-rw-r--r--gcc/omp-offload.c4
-rw-r--r--gcc/omp-simd-clone.c2
-rw-r--r--gcc/optabs-query.c2
-rw-r--r--gcc/optabs.c78
-rw-r--r--gcc/optabs.h30
-rw-r--r--gcc/optinfo.h2
-rw-r--r--gcc/poly-int.h2
-rw-r--r--gcc/predict.c38
-rw-r--r--gcc/predict.h8
-rw-r--r--gcc/profile-count.c4
-rw-r--r--gcc/profile-count.h8
-rw-r--r--gcc/profile.c2
-rw-r--r--gcc/regrename.c70
-rw-r--r--gcc/regrename.h8
-rw-r--r--gcc/reload.h8
-rw-r--r--gcc/reload1.c76
-rw-r--r--gcc/rtl.h8
-rw-r--r--gcc/sanopt.c10
-rw-r--r--gcc/sched-deps.c44
-rw-r--r--gcc/sched-ebb.c2
-rw-r--r--gcc/sched-int.h18
-rw-r--r--gcc/sched-rgn.c12
-rw-r--r--gcc/sel-sched-ir.c28
-rw-r--r--gcc/sel-sched-ir.h16
-rw-r--r--gcc/selftest.h2
-rw-r--r--gcc/sese.c6
-rw-r--r--gcc/sese.h6
-rw-r--r--gcc/sreal.c2
-rw-r--r--gcc/sreal.h4
-rw-r--r--gcc/streamer-hooks.h8
-rw-r--r--gcc/target-globals.c14
-rw-r--r--gcc/target-globals.h16
-rw-r--r--gcc/target.def10
-rw-r--r--gcc/target.h10
-rw-r--r--gcc/targhooks.c6
-rw-r--r--gcc/targhooks.h8
-rw-r--r--gcc/toplev.c4
-rw-r--r--gcc/tree-affine.c8
-rw-r--r--gcc/tree-affine.h4
-rw-r--r--gcc/tree-cfg.c52
-rw-r--r--gcc/tree-cfg.h10
-rw-r--r--gcc/tree-chrec.c26
-rw-r--r--gcc/tree-chrec.h4
-rw-r--r--gcc/tree-core.h4
-rw-r--r--gcc/tree-data-ref.c56
-rw-r--r--gcc/tree-data-ref.h20
-rw-r--r--gcc/tree-if-conv.c50
-rw-r--r--gcc/tree-if-conv.h2
-rw-r--r--gcc/tree-inline.c6
-rw-r--r--gcc/tree-loop-distribution.c48
-rw-r--r--gcc/tree-parloops.c42
-rw-r--r--gcc/tree-pass.h2
-rw-r--r--gcc/tree-predcom.c64
-rw-r--r--gcc/tree-scalar-evolution.c102
-rw-r--r--gcc/tree-scalar-evolution.h24
-rw-r--r--gcc/tree-ssa-address.h4
-rw-r--r--gcc/tree-ssa-dce.c2
-rw-r--r--gcc/tree-ssa-dom.c6
-rw-r--r--gcc/tree-ssa-live.c4
-rw-r--r--gcc/tree-ssa-live.h2
-rw-r--r--gcc/tree-ssa-loop-ch.c16
-rw-r--r--gcc/tree-ssa-loop-im.c126
-rw-r--r--gcc/tree-ssa-loop-ivcanon.c36
-rw-r--r--gcc/tree-ssa-loop-ivopts.c208
-rw-r--r--gcc/tree-ssa-loop-ivopts.h8
-rw-r--r--gcc/tree-ssa-loop-manip.c58
-rw-r--r--gcc/tree-ssa-loop-manip.h36
-rw-r--r--gcc/tree-ssa-loop-niter.c146
-rw-r--r--gcc/tree-ssa-loop-niter.h52
-rw-r--r--gcc/tree-ssa-loop-prefetch.c36
-rw-r--r--gcc/tree-ssa-loop-split.c24
-rw-r--r--gcc/tree-ssa-loop-unswitch.c50
-rw-r--r--gcc/tree-ssa-loop.c6
-rw-r--r--gcc/tree-ssa-loop.h4
-rw-r--r--gcc/tree-ssa-reassoc.c16
-rw-r--r--gcc/tree-ssa-scopedtables.c4
-rw-r--r--gcc/tree-ssa-scopedtables.h2
-rw-r--r--gcc/tree-ssa-structalias.c16
-rw-r--r--gcc/tree-ssa-threadupdate.c8
-rw-r--r--gcc/tree-ssa-threadupdate.h2
-rw-r--r--gcc/tree-streamer-in.c98
-rw-r--r--gcc/tree-streamer.h16
-rw-r--r--gcc/tree-switch-conversion.c6
-rw-r--r--gcc/tree-vect-data-refs.c36
-rw-r--r--gcc/tree-vect-loop-manip.c72
-rw-r--r--gcc/tree-vect-loop.c86
-rw-r--r--gcc/tree-vect-patterns.c6
-rw-r--r--gcc/tree-vect-slp.c2
-rw-r--r--gcc/tree-vect-stmts.c40
-rw-r--r--gcc/tree-vectorizer.c16
-rw-r--r--gcc/tree-vectorizer.h52
-rw-r--r--gcc/tree.c30
-rw-r--r--gcc/value-prof.c2
-rw-r--r--gcc/value-prof.h2
-rw-r--r--gcc/var-tracking.c16
-rw-r--r--gcc/varasm.c34
-rw-r--r--gcc/vr-values.c4
-rw-r--r--gcc/vr-values.h2
-rw-r--r--gcc/web.c2
-rw-r--r--gcc/wide-int.h2
230 files changed, 4109 insertions, 2643 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index dc34222f4d9..3872b6dbb8a 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -8,6 +8,1437 @@
2019-07-09 Martin Sebor <msebor@redhat.com>
PR c++/61339
+ * auto-profile.c: Change class-key of PODs to struct and others
+ to class.
+ * basic-block.h: Same.
+ * bitmap.c (bitmap_alloc): Same.
+ * bitmap.h: Same.
+ * builtins.c (expand_builtin_prefetch): Same.
+ (expand_builtin_interclass_mathfn): Same.
+ (expand_builtin_strlen): Same.
+ (expand_builtin_mempcpy_args): Same.
+ (expand_cmpstr): Same.
+ (expand_builtin___clear_cache): Same.
+ (expand_ifn_atomic_bit_test_and): Same.
+ (expand_builtin_thread_pointer): Same.
+ (expand_builtin_set_thread_pointer): Same.
+ * caller-save.c (setup_save_areas): Same.
+ (replace_reg_with_saved_mem): Same.
+ (insert_restore): Same.
+ (insert_save): Same.
+ (add_used_regs): Same.
+ * cfg.c (get_bb_copy): Same.
+ (set_loop_copy): Same.
+ * cfg.h: Same.
+ * cfganal.h: Same.
+ * cfgexpand.c (alloc_stack_frame_space): Same.
+ (add_stack_var): Same.
+ (add_stack_var_conflict): Same.
+ (add_scope_conflicts_1): Same.
+ (update_alias_info_with_stack_vars): Same.
+ (expand_used_vars): Same.
+ * cfghooks.c (redirect_edge_and_branch_force): Same.
+ (delete_basic_block): Same.
+ (split_edge): Same.
+ (make_forwarder_block): Same.
+ (force_nonfallthru): Same.
+ (duplicate_block): Same.
+ (lv_flush_pending_stmts): Same.
+ * cfghooks.h: Same.
+ * cfgloop.c (flow_loops_cfg_dump): Same.
+ (flow_loop_nested_p): Same.
+ (superloop_at_depth): Same.
+ (get_loop_latch_edges): Same.
+ (flow_loop_dump): Same.
+ (flow_loops_dump): Same.
+ (flow_loops_free): Same.
+ (flow_loop_nodes_find): Same.
+ (establish_preds): Same.
+ (flow_loop_tree_node_add): Same.
+ (flow_loop_tree_node_remove): Same.
+ (flow_loops_find): Same.
+ (find_subloop_latch_edge_by_profile): Same.
+ (find_subloop_latch_edge_by_ivs): Same.
+ (mfb_redirect_edges_in_set): Same.
+ (form_subloop): Same.
+ (merge_latch_edges): Same.
+ (disambiguate_multiple_latches): Same.
+ (disambiguate_loops_with_multiple_latches): Same.
+ (flow_bb_inside_loop_p): Same.
+ (glb_enum_p): Same.
+ (get_loop_body_with_size): Same.
+ (get_loop_body): Same.
+ (fill_sons_in_loop): Same.
+ (get_loop_body_in_dom_order): Same.
+ (get_loop_body_in_custom_order): Same.
+ (release_recorded_exits): Same.
+ (get_loop_exit_edges): Same.
+ (num_loop_branches): Same.
+ (remove_bb_from_loops): Same.
+ (find_common_loop): Same.
+ (delete_loop): Same.
+ (cancel_loop): Same.
+ (verify_loop_structure): Same.
+ (loop_preheader_edge): Same.
+ (loop_exit_edge_p): Same.
+ (single_exit): Same.
+ (loop_exits_to_bb_p): Same.
+ (loop_exits_from_bb_p): Same.
+ (get_loop_location): Same.
+ (record_niter_bound): Same.
+ (get_estimated_loop_iterations_int): Same.
+ (max_stmt_executions_int): Same.
+ (likely_max_stmt_executions_int): Same.
+ (get_estimated_loop_iterations): Same.
+ (get_max_loop_iterations): Same.
+ (get_max_loop_iterations_int): Same.
+ (get_likely_max_loop_iterations): Same.
+ * cfgloop.h (simple_loop_desc): Same.
+ (get_loop): Same.
+ (loop_depth): Same.
+ (loop_outer): Same.
+ (loop_iterator::next): Same.
+ (loop_outermost): Same.
+ * cfgloopanal.c (mark_irreducible_loops): Same.
+ (num_loop_insns): Same.
+ (average_num_loop_insns): Same.
+ (expected_loop_iterations_unbounded): Same.
+ (expected_loop_iterations): Same.
+ (mark_loop_exit_edges): Same.
+ (single_likely_exit): Same.
+ * cfgloopmanip.c (fix_bb_placement): Same.
+ (fix_bb_placements): Same.
+ (remove_path): Same.
+ (place_new_loop): Same.
+ (add_loop): Same.
+ (scale_loop_frequencies): Same.
+ (scale_loop_profile): Same.
+ (create_empty_if_region_on_edge): Same.
+ (create_empty_loop_on_edge): Same.
+ (loopify): Same.
+ (unloop): Same.
+ (fix_loop_placements): Same.
+ (copy_loop_info): Same.
+ (duplicate_loop): Same.
+ (duplicate_subloops): Same.
+ (loop_redirect_edge): Same.
+ (can_duplicate_loop_p): Same.
+ (duplicate_loop_to_header_edge): Same.
+ (mfb_keep_just): Same.
+ (has_preds_from_loop): Same.
+ (create_preheader): Same.
+ (create_preheaders): Same.
+ (lv_adjust_loop_entry_edge): Same.
+ (loop_version): Same.
+ * cfgloopmanip.h: Same.
+ * cgraph.h: Same.
+ * cgraphbuild.c: Same.
+ * combine.c (make_extraction): Same.
+ * config/i386/i386-features.c: Same.
+ * config/i386/i386-features.h: Same.
+ * config/i386/i386.c (ix86_emit_outlined_ms2sysv_save): Same.
+ (ix86_emit_outlined_ms2sysv_restore): Same.
+ (ix86_noce_conversion_profitable_p): Same.
+ (ix86_init_cost): Same.
+ (ix86_simd_clone_usable): Same.
+ * configure.ac: Same.
+ * coretypes.h: Same.
+ * data-streamer-in.c (string_for_index): Same.
+ (streamer_read_indexed_string): Same.
+ (streamer_read_string): Same.
+ (bp_unpack_indexed_string): Same.
+ (bp_unpack_string): Same.
+ (streamer_read_uhwi): Same.
+ (streamer_read_hwi): Same.
+ (streamer_read_gcov_count): Same.
+ (streamer_read_wide_int): Same.
+ * data-streamer.h (streamer_write_bitpack): Same.
+ (bp_unpack_value): Same.
+ (streamer_write_char_stream): Same.
+ (streamer_write_hwi_in_range): Same.
+ (streamer_write_record_start): Same.
+ * ddg.c (create_ddg_dep_from_intra_loop_link): Same.
+ (add_cross_iteration_register_deps): Same.
+ (build_intra_loop_deps): Same.
+ * df-core.c (df_analyze): Same.
+ (loop_post_order_compute): Same.
+ (loop_inverted_post_order_compute): Same.
+ * df-problems.c (df_rd_alloc): Same.
+ (df_rd_simulate_one_insn): Same.
+ (df_rd_local_compute): Same.
+ (df_rd_init_solution): Same.
+ (df_rd_confluence_n): Same.
+ (df_rd_transfer_function): Same.
+ (df_rd_free): Same.
+ (df_rd_dump_defs_set): Same.
+ (df_rd_top_dump): Same.
+ (df_lr_alloc): Same.
+ (df_lr_reset): Same.
+ (df_lr_local_compute): Same.
+ (df_lr_init): Same.
+ (df_lr_confluence_n): Same.
+ (df_lr_free): Same.
+ (df_lr_top_dump): Same.
+ (df_lr_verify_transfer_functions): Same.
+ (df_live_alloc): Same.
+ (df_live_reset): Same.
+ (df_live_init): Same.
+ (df_live_confluence_n): Same.
+ (df_live_finalize): Same.
+ (df_live_free): Same.
+ (df_live_top_dump): Same.
+ (df_live_verify_transfer_functions): Same.
+ (df_mir_alloc): Same.
+ (df_mir_reset): Same.
+ (df_mir_init): Same.
+ (df_mir_confluence_n): Same.
+ (df_mir_free): Same.
+ (df_mir_top_dump): Same.
+ (df_word_lr_alloc): Same.
+ (df_word_lr_reset): Same.
+ (df_word_lr_init): Same.
+ (df_word_lr_confluence_n): Same.
+ (df_word_lr_free): Same.
+ (df_word_lr_top_dump): Same.
+ (df_md_alloc): Same.
+ (df_md_simulate_one_insn): Same.
+ (df_md_reset): Same.
+ (df_md_init): Same.
+ (df_md_free): Same.
+ (df_md_top_dump): Same.
+ * df-scan.c (df_insn_delete): Same.
+ (df_insn_rescan): Same.
+ (df_notes_rescan): Same.
+ (df_sort_and_compress_mws): Same.
+ (df_install_mws): Same.
+ (df_refs_add_to_chains): Same.
+ (df_ref_create_structure): Same.
+ (df_ref_record): Same.
+ (df_def_record_1): Same.
+ (df_find_hard_reg_defs): Same.
+ (df_uses_record): Same.
+ (df_get_conditional_uses): Same.
+ (df_get_call_refs): Same.
+ (df_recompute_luids): Same.
+ (df_get_entry_block_def_set): Same.
+ (df_entry_block_defs_collect): Same.
+ (df_get_exit_block_use_set): Same.
+ (df_exit_block_uses_collect): Same.
+ (df_mws_verify): Same.
+ (df_bb_verify): Same.
+ * df.h (df_scan_get_bb_info): Same.
+ * doc/tm.texi: Same.
+ * dse.c (record_store): Same.
+ * dumpfile.h: Same.
+ * emit-rtl.c (const_fixed_hasher::equal): Same.
+ (set_mem_attributes_minus_bitpos): Same.
+ (change_address): Same.
+ (adjust_address_1): Same.
+ (offset_address): Same.
+ * emit-rtl.h: Same.
+ * except.c (dw2_build_landing_pads): Same.
+ (sjlj_emit_dispatch_table): Same.
+ * explow.c (allocate_dynamic_stack_space): Same.
+ (emit_stack_probe): Same.
+ (probe_stack_range): Same.
+ * expmed.c (store_bit_field_using_insv): Same.
+ (store_bit_field_1): Same.
+ (store_integral_bit_field): Same.
+ (extract_bit_field_using_extv): Same.
+ (extract_bit_field_1): Same.
+ (emit_cstore): Same.
+ * expr.c (emit_block_move_via_cpymem): Same.
+ (expand_cmpstrn_or_cmpmem): Same.
+ (set_storage_via_setmem): Same.
+ (emit_single_push_insn_1): Same.
+ (expand_assignment): Same.
+ (store_constructor): Same.
+ (expand_expr_real_2): Same.
+ (expand_expr_real_1): Same.
+ (try_casesi): Same.
+ * flags.h: Same.
+ * function.c (try_fit_stack_local): Same.
+ (assign_stack_local_1): Same.
+ (assign_stack_local): Same.
+ (cut_slot_from_list): Same.
+ (insert_slot_to_list): Same.
+ (max_slot_level): Same.
+ (move_slot_to_level): Same.
+ (temp_address_hasher::equal): Same.
+ (remove_unused_temp_slot_addresses): Same.
+ (assign_temp): Same.
+ (combine_temp_slots): Same.
+ (update_temp_slot_address): Same.
+ (preserve_temp_slots): Same.
+ * function.h: Same.
+ * fwprop.c: Same.
+ * gcc-rich-location.h: Same.
+ * gcov.c: Same.
+ * genattrtab.c (check_attr_test): Same.
+ (check_attr_value): Same.
+ (convert_set_attr_alternative): Same.
+ (convert_set_attr): Same.
+ (check_defs): Same.
+ (copy_boolean): Same.
+ (get_attr_value): Same.
+ (expand_delays): Same.
+ (make_length_attrs): Same.
+ (min_fn): Same.
+ (make_alternative_compare): Same.
+ (simplify_test_exp): Same.
+ (tests_attr_p): Same.
+ (get_attr_order): Same.
+ (clear_struct_flag): Same.
+ (gen_attr): Same.
+ (compares_alternatives_p): Same.
+ (gen_insn): Same.
+ (gen_delay): Same.
+ (find_attrs_to_cache): Same.
+ (write_test_expr): Same.
+ (walk_attr_value): Same.
+ (write_attr_get): Same.
+ (eliminate_known_true): Same.
+ (write_insn_cases): Same.
+ (write_attr_case): Same.
+ (write_attr_valueq): Same.
+ (write_attr_value): Same.
+ (write_dummy_eligible_delay): Same.
+ (next_comma_elt): Same.
+ (find_attr): Same.
+ (make_internal_attr): Same.
+ (copy_rtx_unchanging): Same.
+ (gen_insn_reserv): Same.
+ (check_tune_attr): Same.
+ (make_automaton_attrs): Same.
+ (handle_arg): Same.
+ * genextract.c (gen_insn): Same.
+ (VEC_char_to_string): Same.
+ * genmatch.c (print_operand): Same.
+ (lower): Same.
+ (parser::parse_operation): Same.
+ (parser::parse_capture): Same.
+ (parser::parse_c_expr): Same.
+ (parser::parse_simplify): Same.
+ (main): Same.
+ * genoutput.c (output_operand_data): Same.
+ (output_get_insn_name): Same.
+ (compare_operands): Same.
+ (place_operands): Same.
+ (process_template): Same.
+ (validate_insn_alternatives): Same.
+ (validate_insn_operands): Same.
+ (gen_expand): Same.
+ (note_constraint): Same.
+ * genpreds.c (write_one_predicate_function): Same.
+ (add_constraint): Same.
+ (process_define_register_constraint): Same.
+ (write_lookup_constraint_1): Same.
+ (write_lookup_constraint_array): Same.
+ (write_insn_constraint_len): Same.
+ (write_reg_class_for_constraint_1): Same.
+ (write_constraint_satisfied_p_array): Same.
+ * genrecog.c (optimize_subroutine_group): Same.
+ * gensupport.c (process_define_predicate): Same.
+ (queue_pattern): Same.
+ (remove_from_queue): Same.
+ (process_rtx): Same.
+ (is_predicable): Same.
+ (change_subst_attribute): Same.
+ (subst_pattern_match): Same.
+ (alter_constraints): Same.
+ (alter_attrs_for_insn): Same.
+ (shift_output_template): Same.
+ (alter_output_for_subst_insn): Same.
+ (process_one_cond_exec): Same.
+ (subst_dup): Same.
+ (process_define_cond_exec): Same.
+ (mnemonic_htab_callback): Same.
+ (gen_mnemonic_attr): Same.
+ (read_md_rtx): Same.
+ * ggc-page.c: Same.
+ * gimple-loop-interchange.cc (dump_reduction): Same.
+ (dump_induction): Same.
+ (loop_cand::~loop_cand): Same.
+ (free_data_refs_with_aux): Same.
+ (tree_loop_interchange::interchange_loops): Same.
+ (tree_loop_interchange::map_inductions_to_loop): Same.
+ (tree_loop_interchange::move_code_to_inner_loop): Same.
+ (compute_access_stride): Same.
+ (compute_access_strides): Same.
+ (proper_loop_form_for_interchange): Same.
+ (tree_loop_interchange_compute_ddrs): Same.
+ (prune_datarefs_not_in_loop): Same.
+ (prepare_data_references): Same.
+ (pass_linterchange::execute): Same.
+ * gimple-loop-jam.c (bb_prevents_fusion_p): Same.
+ (unroll_jam_possible_p): Same.
+ (fuse_loops): Same.
+ (adjust_unroll_factor): Same.
+ (tree_loop_unroll_and_jam): Same.
+ * gimple-loop-versioning.cc (loop_versioning::~loop_versioning): Same.
+ (loop_versioning::expensive_stmt_p): Same.
+ (loop_versioning::version_for_unity): Same.
+ (loop_versioning::dump_inner_likelihood): Same.
+ (loop_versioning::find_per_loop_multiplication): Same.
+ (loop_versioning::analyze_term_using_scevs): Same.
+ (loop_versioning::record_address_fragment): Same.
+ (loop_versioning::analyze_expr): Same.
+ (loop_versioning::analyze_blocks): Same.
+ (loop_versioning::prune_conditions): Same.
+ (loop_versioning::merge_loop_info): Same.
+ (loop_versioning::add_loop_to_queue): Same.
+ (loop_versioning::decide_whether_loop_is_versionable): Same.
+ (loop_versioning::make_versioning_decisions): Same.
+ (loop_versioning::implement_versioning_decisions): Same.
+ * gimple-ssa-evrp-analyze.c
+ (evrp_range_analyzer::record_ranges_from_phis): Same.
+ * gimple-ssa-store-merging.c (split_store::split_store): Same.
+ (count_multiple_uses): Same.
+ (split_group): Same.
+ (imm_store_chain_info::output_merged_store): Same.
+ (pass_store_merging::process_store): Same.
+ * gimple-ssa-strength-reduction.c (slsr_process_phi): Same.
+ * gimple-ssa-warn-alloca.c (adjusted_warn_limit): Same.
+ (is_max): Same.
+ (alloca_call_type): Same.
+ (pass_walloca::execute): Same.
+ * gimple-streamer-in.c (input_phi): Same.
+ (input_gimple_stmt): Same.
+ * gimple-streamer.h: Same.
+ * godump.c (go_force_record_alignment): Same.
+ (go_format_type): Same.
+ (go_output_type): Same.
+ (go_output_fndecl): Same.
+ (go_output_typedef): Same.
+ (keyword_hash_init): Same.
+ (find_dummy_types): Same.
+ * graph.c (draw_cfg_nodes_no_loops): Same.
+ (draw_cfg_nodes_for_loop): Same.
+ * hard-reg-set.h (hard_reg_set_iter_next): Same.
+ * hsa-brig.c: Same.
+ * hsa-common.h (hsa_internal_fn_hasher::equal): Same.
+ * hsa-dump.c (dump_hsa_cfun): Same.
+ * hsa-gen.c (gen_function_def_parameters): Same.
+ * hsa-regalloc.c (dump_hsa_cfun_regalloc): Same.
+ * input.c (dump_line_table_statistics): Same.
+ (test_lexer): Same.
+ * input.h: Same.
+ * internal-fn.c (get_multi_vector_move): Same.
+ (expand_load_lanes_optab_fn): Same.
+ (expand_GOMP_SIMT_ENTER_ALLOC): Same.
+ (expand_GOMP_SIMT_EXIT): Same.
+ (expand_GOMP_SIMT_LAST_LANE): Same.
+ (expand_GOMP_SIMT_ORDERED_PRED): Same.
+ (expand_GOMP_SIMT_VOTE_ANY): Same.
+ (expand_GOMP_SIMT_XCHG_BFLY): Same.
+ (expand_GOMP_SIMT_XCHG_IDX): Same.
+ (expand_addsub_overflow): Same.
+ (expand_neg_overflow): Same.
+ (expand_mul_overflow): Same.
+ (expand_call_mem_ref): Same.
+ (expand_mask_load_optab_fn): Same.
+ (expand_scatter_store_optab_fn): Same.
+ (expand_gather_load_optab_fn): Same.
+ * ipa-cp.c (ipa_get_parm_lattices): Same.
+ (print_all_lattices): Same.
+ (ignore_edge_p): Same.
+ (build_toporder_info): Same.
+ (free_toporder_info): Same.
+ (push_node_to_stack): Same.
+ (ipcp_lattice<valtype>::set_contains_variable): Same.
+ (set_agg_lats_to_bottom): Same.
+ (ipcp_bits_lattice::meet_with): Same.
+ (set_single_call_flag): Same.
+ (initialize_node_lattices): Same.
+ (ipa_get_jf_ancestor_result): Same.
+ (ipcp_verify_propagated_values): Same.
+ (propagate_scalar_across_jump_function): Same.
+ (propagate_context_across_jump_function): Same.
+ (propagate_bits_across_jump_function): Same.
+ (ipa_vr_operation_and_type_effects): Same.
+ (propagate_vr_across_jump_function): Same.
+ (set_check_aggs_by_ref): Same.
+ (set_chain_of_aglats_contains_variable): Same.
+ (merge_aggregate_lattices): Same.
+ (agg_pass_through_permissible_p): Same.
+ (propagate_aggs_across_jump_function): Same.
+ (call_passes_through_thunk_p): Same.
+ (propagate_constants_across_call): Same.
+ (devirtualization_time_bonus): Same.
+ (good_cloning_opportunity_p): Same.
+ (context_independent_aggregate_values): Same.
+ (gather_context_independent_values): Same.
+ (perform_estimation_of_a_value): Same.
+ (estimate_local_effects): Same.
+ (value_topo_info<valtype>::add_val): Same.
+ (add_all_node_vals_to_toposort): Same.
+ (value_topo_info<valtype>::propagate_effects): Same.
+ (ipcp_propagate_stage): Same.
+ (ipcp_discover_new_direct_edges): Same.
+ (same_node_or_its_all_contexts_clone_p): Same.
+ (cgraph_edge_brings_value_p): Same.
+ (gather_edges_for_value): Same.
+ (create_specialized_node): Same.
+ (find_more_scalar_values_for_callers_subset): Same.
+ (find_more_contexts_for_caller_subset): Same.
+ (copy_plats_to_inter): Same.
+ (intersect_aggregates_with_edge): Same.
+ (find_aggregate_values_for_callers_subset): Same.
+ (cgraph_edge_brings_all_agg_vals_for_node): Same.
+ (decide_about_value): Same.
+ (decide_whether_version_node): Same.
+ (spread_undeadness): Same.
+ (identify_dead_nodes): Same.
+ (ipcp_store_vr_results): Same.
+ * ipa-devirt.c (final_warning_record::grow_type_warnings): Same.
+ * ipa-fnsummary.c (ipa_fn_summary::account_size_time): Same.
+ (redirect_to_unreachable): Same.
+ (edge_set_predicate): Same.
+ (evaluate_conditions_for_known_args): Same.
+ (evaluate_properties_for_edge): Same.
+ (ipa_fn_summary_t::duplicate): Same.
+ (ipa_call_summary_t::duplicate): Same.
+ (dump_ipa_call_summary): Same.
+ (ipa_dump_fn_summary): Same.
+ (eliminated_by_inlining_prob): Same.
+ (set_cond_stmt_execution_predicate): Same.
+ (set_switch_stmt_execution_predicate): Same.
+ (compute_bb_predicates): Same.
+ (will_be_nonconstant_expr_predicate): Same.
+ (phi_result_unknown_predicate): Same.
+ (analyze_function_body): Same.
+ (compute_fn_summary): Same.
+ (estimate_edge_devirt_benefit): Same.
+ (estimate_edge_size_and_time): Same.
+ (estimate_calls_size_and_time): Same.
+ (estimate_node_size_and_time): Same.
+ (remap_edge_change_prob): Same.
+ (remap_edge_summaries): Same.
+ (ipa_merge_fn_summary_after_inlining): Same.
+ (ipa_fn_summary_generate): Same.
+ (inline_read_section): Same.
+ (ipa_fn_summary_read): Same.
+ (ipa_fn_summary_write): Same.
+ * ipa-fnsummary.h: Same.
+ * ipa-hsa.c (ipa_hsa_read_section): Same.
+ * ipa-icf-gimple.c (func_checker::compare_loops): Same.
+ * ipa-icf.c (sem_function::param_used_p): Same.
+ * ipa-inline-analysis.c (do_estimate_edge_time): Same.
+ * ipa-inline.c (edge_badness): Same.
+ (inline_small_functions): Same.
+ * ipa-polymorphic-call.c
+ (ipa_polymorphic_call_context::stream_out): Same.
+ * ipa-predicate.c (predicate::remap_after_duplication): Same.
+ (predicate::remap_after_inlining): Same.
+ (predicate::stream_out): Same.
+ * ipa-predicate.h: Same.
+ * ipa-profile.c (ipa_profile_read_summary): Same.
+ * ipa-prop.c (ipa_get_param_decl_index_1): Same.
+ (count_formal_params): Same.
+ (ipa_dump_param): Same.
+ (ipa_alloc_node_params): Same.
+ (ipa_print_node_jump_functions_for_edge): Same.
+ (ipa_print_node_jump_functions): Same.
+ (ipa_load_from_parm_agg): Same.
+ (get_ancestor_addr_info): Same.
+ (ipa_compute_jump_functions_for_edge): Same.
+ (ipa_analyze_virtual_call_uses): Same.
+ (ipa_analyze_stmt_uses): Same.
+ (ipa_analyze_params_uses_in_bb): Same.
+ (update_jump_functions_after_inlining): Same.
+ (try_decrement_rdesc_refcount): Same.
+ (ipa_impossible_devirt_target): Same.
+ (update_indirect_edges_after_inlining): Same.
+ (combine_controlled_uses_counters): Same.
+ (ipa_edge_args_sum_t::duplicate): Same.
+ (ipa_write_jump_function): Same.
+ (ipa_write_indirect_edge_info): Same.
+ (ipa_write_node_info): Same.
+ (ipa_read_edge_info): Same.
+ (ipa_prop_read_section): Same.
+ (read_replacements_section): Same.
+ * ipa-prop.h (ipa_get_param_count): Same.
+ (ipa_get_param): Same.
+ (ipa_get_type): Same.
+ (ipa_get_param_move_cost): Same.
+ (ipa_set_param_used): Same.
+ (ipa_get_controlled_uses): Same.
+ (ipa_set_controlled_uses): Same.
+ (ipa_get_cs_argument_count): Same.
+ * ipa-pure-const.c (analyze_function): Same.
+ (pure_const_read_summary): Same.
+ * ipa-ref.h: Same.
+ * ipa-reference.c (ipa_reference_read_optimization_summary): Same.
+ * ipa-split.c (test_nonssa_use): Same.
+ (dump_split_point): Same.
+ (dominated_by_forbidden): Same.
+ (split_part_set_ssa_name_p): Same.
+ (find_split_points): Same.
+ * ira-build.c (finish_loop_tree_nodes): Same.
+ (low_pressure_loop_node_p): Same.
+ * ira-color.c (ira_reuse_stack_slot): Same.
+ * ira-int.h: Same.
+ * ira.c (setup_reg_equiv): Same.
+ (print_insn_chain): Same.
+ (ira): Same.
+ * loop-doloop.c (doloop_condition_get): Same.
+ (add_test): Same.
+ (record_reg_sets): Same.
+ (doloop_optimize): Same.
+ * loop-init.c (loop_optimizer_init): Same.
+ (fix_loop_structure): Same.
+ * loop-invariant.c (merge_identical_invariants): Same.
+ (compute_always_reached): Same.
+ (find_exits): Same.
+ (may_assign_reg_p): Same.
+ (find_invariants_bb): Same.
+ (find_invariants_body): Same.
+ (replace_uses): Same.
+ (can_move_invariant_reg): Same.
+ (free_inv_motion_data): Same.
+ (move_single_loop_invariants): Same.
+ (change_pressure): Same.
+ (mark_ref_regs): Same.
+ (calculate_loop_reg_pressure): Same.
+ * loop-iv.c (biv_entry_hasher::equal): Same.
+ (iv_extend_to_rtx_code): Same.
+ (check_iv_ref_table_size): Same.
+ (clear_iv_info): Same.
+ (latch_dominating_def): Same.
+ (iv_get_reaching_def): Same.
+ (iv_constant): Same.
+ (iv_subreg): Same.
+ (iv_extend): Same.
+ (iv_neg): Same.
+ (iv_add): Same.
+ (iv_mult): Same.
+ (get_biv_step): Same.
+ (record_iv): Same.
+ (analyzed_for_bivness_p): Same.
+ (record_biv): Same.
+ (iv_analyze_biv): Same.
+ (iv_analyze_expr): Same.
+ (iv_analyze_def): Same.
+ (iv_analyze_op): Same.
+ (iv_analyze): Same.
+ (iv_analyze_result): Same.
+ (biv_p): Same.
+ (eliminate_implied_conditions): Same.
+ (simplify_using_initial_values): Same.
+ (shorten_into_mode): Same.
+ (canonicalize_iv_subregs): Same.
+ (determine_max_iter): Same.
+ (check_simple_exit): Same.
+ (find_simple_exit): Same.
+ (get_simple_loop_desc): Same.
+ * loop-unroll.c (report_unroll): Same.
+ (decide_unrolling): Same.
+ (unroll_loops): Same.
+ (loop_exit_at_end_p): Same.
+ (decide_unroll_constant_iterations): Same.
+ (unroll_loop_constant_iterations): Same.
+ (compare_and_jump_seq): Same.
+ (unroll_loop_runtime_iterations): Same.
+ (decide_unroll_stupid): Same.
+ (unroll_loop_stupid): Same.
+ (referenced_in_one_insn_in_loop_p): Same.
+ (reset_debug_uses_in_loop): Same.
+ (analyze_iv_to_split_insn): Same.
+ * lra-eliminations.c (lra_debug_elim_table): Same.
+ (setup_can_eliminate): Same.
+ (form_sum): Same.
+ (lra_get_elimination_hard_regno): Same.
+ (lra_eliminate_regs_1): Same.
+ (eliminate_regs_in_insn): Same.
+ (update_reg_eliminate): Same.
+ (init_elimination): Same.
+ (lra_eliminate): Same.
+ * lra-int.h: Same.
+ * lra-lives.c (initiate_live_solver): Same.
+ * lra-remat.c (create_remat_bb_data): Same.
+ * lra-spills.c (lra_spill): Same.
+ * lra.c (lra_set_insn_recog_data): Same.
+ (lra_set_used_insn_alternative_by_uid): Same.
+ (init_reg_info): Same.
+ (expand_reg_info): Same.
+ * lto-cgraph.c (output_symtab): Same.
+ (read_identifier): Same.
+ (get_alias_symbol): Same.
+ (input_node): Same.
+ (input_varpool_node): Same.
+ (input_ref): Same.
+ (input_edge): Same.
+ (input_cgraph_1): Same.
+ (input_refs): Same.
+ (input_symtab): Same.
+ (input_offload_tables): Same.
+ (output_cgraph_opt_summary): Same.
+ (input_edge_opt_summary): Same.
+ (input_cgraph_opt_section): Same.
+ * lto-section-in.c (lto_free_raw_section_data): Same.
+ (lto_create_simple_input_block): Same.
+ (lto_free_function_in_decl_state_for_node): Same.
+ * lto-streamer-in.c (lto_tag_check_set): Same.
+ (lto_location_cache::revert_location_cache): Same.
+ (lto_location_cache::input_location): Same.
+ (lto_input_location): Same.
+ (stream_input_location_now): Same.
+ (lto_input_tree_ref): Same.
+ (lto_input_eh_catch_list): Same.
+ (input_eh_region): Same.
+ (lto_init_eh): Same.
+ (make_new_block): Same.
+ (input_cfg): Same.
+ (fixup_call_stmt_edges): Same.
+ (input_struct_function_base): Same.
+ (input_function): Same.
+ (lto_read_body_or_constructor): Same.
+ (lto_read_tree_1): Same.
+ (lto_read_tree): Same.
+ (lto_input_scc): Same.
+ (lto_input_tree_1): Same.
+ (lto_input_toplevel_asms): Same.
+ (lto_input_mode_table): Same.
+ (lto_reader_init): Same.
+ (lto_data_in_create): Same.
+ * lto-streamer-out.c (output_cfg): Same.
+ * lto-streamer.h: Same.
+ * modulo-sched.c (duplicate_insns_of_cycles): Same.
+ (generate_prolog_epilog): Same.
+ (mark_loop_unsched): Same.
+ (dump_insn_location): Same.
+ (loop_canon_p): Same.
+ (sms_schedule): Same.
+ * omp-expand.c (expand_omp_for_ordered_loops): Same.
+ (expand_omp_for_generic): Same.
+ (expand_omp_for_static_nochunk): Same.
+ (expand_omp_for_static_chunk): Same.
+ (expand_omp_simd): Same.
+ (expand_omp_taskloop_for_inner): Same.
+ (expand_oacc_for): Same.
+ (expand_omp_atomic_pipeline): Same.
+ (mark_loops_in_oacc_kernels_region): Same.
+ * omp-offload.c (oacc_xform_loop): Same.
+ * omp-simd-clone.c (simd_clone_adjust): Same.
+ * optabs-query.c (get_traditional_extraction_insn): Same.
+ * optabs.c (expand_vector_broadcast): Same.
+ (expand_binop_directly): Same.
+ (expand_twoval_unop): Same.
+ (expand_twoval_binop): Same.
+ (expand_unop_direct): Same.
+ (emit_indirect_jump): Same.
+ (emit_conditional_move): Same.
+ (emit_conditional_neg_or_complement): Same.
+ (emit_conditional_add): Same.
+ (vector_compare_rtx): Same.
+ (expand_vec_perm_1): Same.
+ (expand_vec_perm_const): Same.
+ (expand_vec_cond_expr): Same.
+ (expand_vec_series_expr): Same.
+ (maybe_emit_atomic_exchange): Same.
+ (maybe_emit_sync_lock_test_and_set): Same.
+ (expand_atomic_compare_and_swap): Same.
+ (expand_atomic_load): Same.
+ (expand_atomic_store): Same.
+ (maybe_emit_op): Same.
+ (valid_multiword_target_p): Same.
+ (create_integer_operand): Same.
+ (maybe_legitimize_operand_same_code): Same.
+ (maybe_legitimize_operand): Same.
+ (create_convert_operand_from_type): Same.
+ (can_reuse_operands_p): Same.
+ (maybe_legitimize_operands): Same.
+ (maybe_gen_insn): Same.
+ (maybe_expand_insn): Same.
+ (maybe_expand_jump_insn): Same.
+ (expand_insn): Same.
+ * optabs.h (create_expand_operand): Same.
+ (create_fixed_operand): Same.
+ (create_output_operand): Same.
+ (create_input_operand): Same.
+ (create_convert_operand_to): Same.
+ (create_convert_operand_from): Same.
+ * optinfo.h: Same.
+ * poly-int.h: Same.
+ * predict.c (optimize_insn_for_speed_p): Same.
+ (optimize_loop_for_size_p): Same.
+ (optimize_loop_for_speed_p): Same.
+ (optimize_loop_nest_for_speed_p): Same.
+ (get_base_value): Same.
+ (predicted_by_loop_heuristics_p): Same.
+ (predict_extra_loop_exits): Same.
+ (predict_loops): Same.
+ (predict_paths_for_bb): Same.
+ (predict_paths_leading_to): Same.
+ (propagate_freq): Same.
+ (pass_profile::execute): Same.
+ * predict.h: Same.
+ * profile-count.c (profile_count::differs_from_p): Same.
+ (profile_probability::differs_lot_from_p): Same.
+ * profile-count.h: Same.
+ * profile.c (branch_prob): Same.
+ * regrename.c (free_chain_data): Same.
+ (mark_conflict): Same.
+ (create_new_chain): Same.
+ (merge_overlapping_regs): Same.
+ (init_rename_info): Same.
+ (merge_chains): Same.
+ (regrename_analyze): Same.
+ (regrename_do_replace): Same.
+ (scan_rtx_reg): Same.
+ (record_out_operands): Same.
+ (build_def_use): Same.
+ * regrename.h: Same.
+ * reload.h: Same.
+ * reload1.c (init_reload): Same.
+ (maybe_fix_stack_asms): Same.
+ (copy_reloads): Same.
+ (count_pseudo): Same.
+ (count_spilled_pseudo): Same.
+ (find_reg): Same.
+ (find_reload_regs): Same.
+ (select_reload_regs): Same.
+ (spill_hard_reg): Same.
+ (fixup_eh_region_note): Same.
+ (set_reload_reg): Same.
+ (allocate_reload_reg): Same.
+ (compute_reload_subreg_offset): Same.
+ (reload_adjust_reg_for_icode): Same.
+ (emit_input_reload_insns): Same.
+ (emit_output_reload_insns): Same.
+ (do_input_reload): Same.
+ (inherit_piecemeal_p): Same.
+ * rtl.h: Same.
+ * sanopt.c (maybe_get_dominating_check): Same.
+ (maybe_optimize_ubsan_ptr_ifn): Same.
+ (can_remove_asan_check): Same.
+ (maybe_optimize_asan_check_ifn): Same.
+ (sanopt_optimize_walker): Same.
+ * sched-deps.c (add_dependence_list): Same.
+ (chain_to_prev_insn): Same.
+ (add_insn_mem_dependence): Same.
+ (create_insn_reg_set): Same.
+ (maybe_extend_reg_info_p): Same.
+ (sched_analyze_reg): Same.
+ (sched_analyze_1): Same.
+ (get_implicit_reg_pending_clobbers): Same.
+ (chain_to_prev_insn_p): Same.
+ (deps_analyze_insn): Same.
+ (deps_start_bb): Same.
+ (sched_free_deps): Same.
+ (init_deps): Same.
+ (init_deps_reg_last): Same.
+ (free_deps): Same.
+ * sched-ebb.c: Same.
+ * sched-int.h: Same.
+ * sched-rgn.c (add_branch_dependences): Same.
+ (concat_insn_mem_list): Same.
+ (deps_join): Same.
+ (sched_rgn_compute_dependencies): Same.
+ * sel-sched-ir.c (reset_target_context): Same.
+ (copy_deps_context): Same.
+ (init_id_from_df): Same.
+ (has_dependence_p): Same.
+ (change_loops_latches): Same.
+ (bb_top_order_comparator): Same.
+ (make_region_from_loop_preheader): Same.
+ (sel_init_pipelining): Same.
+ (get_loop_nest_for_rgn): Same.
+ (make_regions_from_the_rest): Same.
+ (sel_is_loop_preheader_p): Same.
+ * sel-sched-ir.h (inner_loop_header_p): Same.
+ (get_all_loop_exits): Same.
+ * selftest.h: Same.
+ * sese.c (sese_build_liveouts): Same.
+ (sese_insert_phis_for_liveouts): Same.
+ * sese.h (defined_in_sese_p): Same.
+ * sreal.c (sreal::stream_out): Same.
+ * sreal.h: Same.
+ * streamer-hooks.h: Same.
+ * target-globals.c (save_target_globals): Same.
+ * target-globals.h: Same.
+ * target.def: Same.
+ * target.h: Same.
+ * targhooks.c (default_has_ifunc_p): Same.
+ (default_empty_mask_is_expensive): Same.
+ (default_init_cost): Same.
+ * targhooks.h: Same.
+ * toplev.c: Same.
+ * tree-affine.c (aff_combination_mult): Same.
+ (aff_combination_expand): Same.
+ (aff_combination_constant_multiple_p): Same.
+ * tree-affine.h: Same.
+ * tree-cfg.c (build_gimple_cfg): Same.
+ (replace_loop_annotate_in_block): Same.
+ (replace_uses_by): Same.
+ (remove_bb): Same.
+ (dump_cfg_stats): Same.
+ (gimple_duplicate_sese_region): Same.
+ (gimple_duplicate_sese_tail): Same.
+ (move_block_to_fn): Same.
+ (replace_block_vars_by_duplicates): Same.
+ (move_sese_region_to_fn): Same.
+ (print_loops_bb): Same.
+ (print_loop): Same.
+ (print_loops): Same.
+ (debug): Same.
+ (debug_loops): Same.
+ * tree-cfg.h: Same.
+ * tree-chrec.c (chrec_fold_plus_poly_poly): Same.
+ (chrec_fold_multiply_poly_poly): Same.
+ (chrec_evaluate): Same.
+ (chrec_component_in_loop_num): Same.
+ (reset_evolution_in_loop): Same.
+ (is_multivariate_chrec): Same.
+ (chrec_contains_symbols): Same.
+ (nb_vars_in_chrec): Same.
+ (chrec_convert_1): Same.
+ (chrec_convert_aggressive): Same.
+ * tree-chrec.h: Same.
+ * tree-core.h: Same.
+ * tree-data-ref.c (dump_data_dependence_relation): Same.
+ (canonicalize_base_object_address): Same.
+ (data_ref_compare_tree): Same.
+ (prune_runtime_alias_test_list): Same.
+ (get_segment_min_max): Same.
+ (create_intersect_range_checks): Same.
+ (conflict_fn_no_dependence): Same.
+ (object_address_invariant_in_loop_p): Same.
+ (analyze_ziv_subscript): Same.
+ (analyze_siv_subscript_cst_affine): Same.
+ (analyze_miv_subscript): Same.
+ (analyze_overlapping_iterations): Same.
+ (build_classic_dist_vector_1): Same.
+ (add_other_self_distances): Same.
+ (same_access_functions): Same.
+ (build_classic_dir_vector): Same.
+ (subscript_dependence_tester_1): Same.
+ (subscript_dependence_tester): Same.
+ (access_functions_are_affine_or_constant_p): Same.
+ (get_references_in_stmt): Same.
+ (loop_nest_has_data_refs): Same.
+ (graphite_find_data_references_in_stmt): Same.
+ (find_data_references_in_bb): Same.
+ (get_base_for_alignment): Same.
+ (find_loop_nest_1): Same.
+ (find_loop_nest): Same.
+ * tree-data-ref.h (dr_alignment): Same.
+ (ddr_dependence_level): Same.
+ * tree-if-conv.c (fold_build_cond_expr): Same.
+ (add_to_predicate_list): Same.
+ (add_to_dst_predicate_list): Same.
+ (phi_convertible_by_degenerating_args): Same.
+ (idx_within_array_bound): Same.
+ (all_preds_critical_p): Same.
+ (pred_blocks_visited_p): Same.
+ (predicate_bbs): Same.
+ (build_region): Same.
+ (if_convertible_loop_p_1): Same.
+ (is_cond_scalar_reduction): Same.
+ (predicate_scalar_phi): Same.
+ (remove_conditions_and_labels): Same.
+ (combine_blocks): Same.
+ (version_loop_for_if_conversion): Same.
+ (versionable_outer_loop_p): Same.
+ (ifcvt_local_dce): Same.
+ (tree_if_conversion): Same.
+ (pass_if_conversion::gate): Same.
+ * tree-if-conv.h: Same.
+ * tree-inline.c (maybe_move_debug_stmts_to_successors): Same.
+ * tree-loop-distribution.c (bb_top_order_cmp): Same.
+ (free_rdg): Same.
+ (stmt_has_scalar_dependences_outside_loop): Same.
+ (copy_loop_before): Same.
+ (create_bb_after_loop): Same.
+ (const_with_all_bytes_same): Same.
+ (generate_memset_builtin): Same.
+ (generate_memcpy_builtin): Same.
+ (destroy_loop): Same.
+ (build_rdg_partition_for_vertex): Same.
+ (compute_access_range): Same.
+ (data_ref_segment_size): Same.
+ (latch_dominated_by_data_ref): Same.
+ (compute_alias_check_pairs): Same.
+ (fuse_memset_builtins): Same.
+ (finalize_partitions): Same.
+ (find_seed_stmts_for_distribution): Same.
+ (prepare_perfect_loop_nest): Same.
+ * tree-parloops.c (lambda_transform_legal_p): Same.
+ (loop_parallel_p): Same.
+ (reduc_stmt_res): Same.
+ (add_field_for_name): Same.
+ (create_call_for_reduction_1): Same.
+ (replace_uses_in_bb_by): Same.
+ (transform_to_exit_first_loop_alt): Same.
+ (try_transform_to_exit_first_loop_alt): Same.
+ (transform_to_exit_first_loop): Same.
+ (num_phis): Same.
+ (gen_parallel_loop): Same.
+ (gather_scalar_reductions): Same.
+ (get_omp_data_i_param): Same.
+ (try_create_reduction_list): Same.
+ (oacc_entry_exit_single_gang): Same.
+ (parallelize_loops): Same.
+ * tree-pass.h: Same.
+ * tree-predcom.c (determine_offset): Same.
+ (last_always_executed_block): Same.
+ (split_data_refs_to_components): Same.
+ (suitable_component_p): Same.
+ (valid_initializer_p): Same.
+ (find_looparound_phi): Same.
+ (insert_looparound_copy): Same.
+ (add_looparound_copies): Same.
+ (determine_roots_comp): Same.
+ (predcom_tmp_var): Same.
+ (initialize_root_vars): Same.
+ (initialize_root_vars_store_elim_1): Same.
+ (initialize_root_vars_store_elim_2): Same.
+ (finalize_eliminated_stores): Same.
+ (initialize_root_vars_lm): Same.
+ (remove_stmt): Same.
+ (determine_unroll_factor): Same.
+ (execute_pred_commoning_cbck): Same.
+ (base_names_in_chain_on): Same.
+ (combine_chains): Same.
+ (pcom_stmt_dominates_stmt_p): Same.
+ (try_combine_chains): Same.
+ (prepare_initializers_chain_store_elim): Same.
+ (prepare_initializers_chain): Same.
+ (prepare_initializers): Same.
+ (prepare_finalizers_chain): Same.
+ (prepare_finalizers): Same.
+ (insert_init_seqs): Same.
+ * tree-scalar-evolution.c (loop_phi_node_p): Same.
+ (compute_overall_effect_of_inner_loop): Same.
+ (add_to_evolution_1): Same.
+ (add_to_evolution): Same.
+ (follow_ssa_edge_binary): Same.
+ (follow_ssa_edge_expr): Same.
+ (backedge_phi_arg_p): Same.
+ (follow_ssa_edge_in_condition_phi_branch): Same.
+ (follow_ssa_edge_in_condition_phi): Same.
+ (follow_ssa_edge_inner_loop_phi): Same.
+ (follow_ssa_edge): Same.
+ (analyze_evolution_in_loop): Same.
+ (analyze_initial_condition): Same.
+ (interpret_loop_phi): Same.
+ (interpret_condition_phi): Same.
+ (interpret_rhs_expr): Same.
+ (interpret_expr): Same.
+ (interpret_gimple_assign): Same.
+ (analyze_scalar_evolution_1): Same.
+ (analyze_scalar_evolution): Same.
+ (analyze_scalar_evolution_for_address_of): Same.
+ (get_instantiated_value_entry): Same.
+ (loop_closed_phi_def): Same.
+ (instantiate_scev_name): Same.
+ (instantiate_scev_poly): Same.
+ (instantiate_scev_binary): Same.
+ (instantiate_scev_convert): Same.
+ (instantiate_scev_not): Same.
+ (instantiate_scev_r): Same.
+ (instantiate_scev): Same.
+ (resolve_mixers): Same.
+ (initialize_scalar_evolutions_analyzer): Same.
+ (scev_reset_htab): Same.
+ (scev_reset): Same.
+ (derive_simple_iv_with_niters): Same.
+ (simple_iv_with_niters): Same.
+ (expression_expensive_p): Same.
+ (final_value_replacement_loop): Same.
+ * tree-scalar-evolution.h (block_before_loop): Same.
+ * tree-ssa-address.h: Same.
+ * tree-ssa-dce.c (find_obviously_necessary_stmts): Same.
+ * tree-ssa-dom.c (edge_info::record_simple_equiv): Same.
+ (record_edge_info): Same.
+ * tree-ssa-live.c (var_map_base_fini): Same.
+ (remove_unused_locals): Same.
+ * tree-ssa-live.h: Same.
+ * tree-ssa-loop-ch.c (should_duplicate_loop_header_p): Same.
+ (pass_ch_vect::execute): Same.
+ (pass_ch::process_loop_p): Same.
+ * tree-ssa-loop-im.c (mem_ref_hasher::hash): Same.
+ (movement_possibility): Same.
+ (outermost_invariant_loop): Same.
+ (stmt_cost): Same.
+ (determine_max_movement): Same.
+ (invariantness_dom_walker::before_dom_children): Same.
+ (move_computations): Same.
+ (may_move_till): Same.
+ (force_move_till_op): Same.
+ (force_move_till): Same.
+ (memref_free): Same.
+ (record_mem_ref_loc): Same.
+ (set_ref_stored_in_loop): Same.
+ (mark_ref_stored): Same.
+ (sort_bbs_in_loop_postorder_cmp): Same.
+ (sort_locs_in_loop_postorder_cmp): Same.
+ (analyze_memory_references): Same.
+ (mem_refs_may_alias_p): Same.
+ (find_ref_loc_in_loop_cmp): Same.
+ (rewrite_mem_ref_loc::operator): Same.
+ (first_mem_ref_loc_1::operator): Same.
+ (sm_set_flag_if_changed::operator): Same.
+ (execute_sm_if_changed_flag_set): Same.
+ (execute_sm): Same.
+ (hoist_memory_references): Same.
+ (ref_always_accessed::operator): Same.
+ (refs_independent_p): Same.
+ (record_dep_loop): Same.
+ (ref_indep_loop_p_1): Same.
+ (ref_indep_loop_p): Same.
+ (can_sm_ref_p): Same.
+ (find_refs_for_sm): Same.
+ (loop_suitable_for_sm): Same.
+ (store_motion_loop): Same.
+ (store_motion): Same.
+ (fill_always_executed_in): Same.
+ * tree-ssa-loop-ivcanon.c (constant_after_peeling): Same.
+ (estimated_unrolled_size): Same.
+ (loop_edge_to_cancel): Same.
+ (remove_exits_and_undefined_stmts): Same.
+ (remove_redundant_iv_tests): Same.
+ (unloop_loops): Same.
+ (estimated_peeled_sequence_size): Same.
+ (try_peel_loop): Same.
+ (canonicalize_loop_induction_variables): Same.
+ (canonicalize_induction_variables): Same.
+ * tree-ssa-loop-ivopts.c (iv_inv_expr_hasher::equal): Same.
+ (name_info): Same.
+ (stmt_after_inc_pos): Same.
+ (contains_abnormal_ssa_name_p): Same.
+ (niter_for_exit): Same.
+ (find_bivs): Same.
+ (mark_bivs): Same.
+ (find_givs_in_bb): Same.
+ (find_induction_variables): Same.
+ (find_interesting_uses_cond): Same.
+ (outermost_invariant_loop_for_expr): Same.
+ (idx_find_step): Same.
+ (add_candidate_1): Same.
+ (add_iv_candidate_derived_from_uses): Same.
+ (alloc_use_cost_map): Same.
+ (prepare_decl_rtl): Same.
+ (generic_predict_doloop_p): Same.
+ (computation_cost): Same.
+ (determine_common_wider_type): Same.
+ (get_computation_aff_1): Same.
+ (get_use_type): Same.
+ (determine_group_iv_cost_address): Same.
+ (iv_period): Same.
+ (difference_cannot_overflow_p): Same.
+ (may_eliminate_iv): Same.
+ (determine_set_costs): Same.
+ (cheaper_cost_pair): Same.
+ (compare_cost_pair): Same.
+ (iv_ca_cand_for_group): Same.
+ (iv_ca_recount_cost): Same.
+ (iv_ca_set_remove_invs): Same.
+ (iv_ca_set_no_cp): Same.
+ (iv_ca_set_add_invs): Same.
+ (iv_ca_set_cp): Same.
+ (iv_ca_add_group): Same.
+ (iv_ca_cost): Same.
+ (iv_ca_compare_deps): Same.
+ (iv_ca_delta_reverse): Same.
+ (iv_ca_delta_commit): Same.
+ (iv_ca_cand_used_p): Same.
+ (iv_ca_delta_free): Same.
+ (iv_ca_new): Same.
+ (iv_ca_free): Same.
+ (iv_ca_dump): Same.
+ (iv_ca_extend): Same.
+ (iv_ca_narrow): Same.
+ (iv_ca_prune): Same.
+ (cheaper_cost_with_cand): Same.
+ (iv_ca_replace): Same.
+ (try_add_cand_for): Same.
+ (get_initial_solution): Same.
+ (try_improve_iv_set): Same.
+ (find_optimal_iv_set_1): Same.
+ (create_new_iv): Same.
+ (rewrite_use_compare): Same.
+ (remove_unused_ivs): Same.
+ (determine_scaling_factor): Same.
+ * tree-ssa-loop-ivopts.h: Same.
+ * tree-ssa-loop-manip.c (create_iv): Same.
+ (compute_live_loop_exits): Same.
+ (add_exit_phi): Same.
+ (add_exit_phis): Same.
+ (find_uses_to_rename_use): Same.
+ (find_uses_to_rename_def): Same.
+ (find_uses_to_rename_in_loop): Same.
+ (rewrite_into_loop_closed_ssa): Same.
+ (check_loop_closed_ssa_bb): Same.
+ (split_loop_exit_edge): Same.
+ (ip_end_pos): Same.
+ (ip_normal_pos): Same.
+ (copy_phi_node_args): Same.
+ (gimple_duplicate_loop_to_header_edge): Same.
+ (can_unroll_loop_p): Same.
+ (determine_exit_conditions): Same.
+ (scale_dominated_blocks_in_loop): Same.
+ (niter_for_unrolled_loop): Same.
+ (tree_transform_and_unroll_loop): Same.
+ (rewrite_all_phi_nodes_with_iv): Same.
+ * tree-ssa-loop-manip.h: Same.
+ * tree-ssa-loop-niter.c (number_of_iterations_ne_max): Same.
+ (number_of_iterations_ne): Same.
+ (assert_no_overflow_lt): Same.
+ (assert_loop_rolls_lt): Same.
+ (number_of_iterations_lt): Same.
+ (adjust_cond_for_loop_until_wrap): Same.
+ (tree_simplify_using_condition): Same.
+ (simplify_using_initial_conditions): Same.
+ (simplify_using_outer_evolutions): Same.
+ (loop_only_exit_p): Same.
+ (ssa_defined_by_minus_one_stmt_p): Same.
+ (number_of_iterations_popcount): Same.
+ (number_of_iterations_exit): Same.
+ (find_loop_niter): Same.
+ (finite_loop_p): Same.
+ (chain_of_csts_start): Same.
+ (get_val_for): Same.
+ (loop_niter_by_eval): Same.
+ (derive_constant_upper_bound_ops): Same.
+ (do_warn_aggressive_loop_optimizations): Same.
+ (record_estimate): Same.
+ (get_cst_init_from_scev): Same.
+ (record_nonwrapping_iv): Same.
+ (idx_infer_loop_bounds): Same.
+ (infer_loop_bounds_from_ref): Same.
+ (infer_loop_bounds_from_array): Same.
+ (infer_loop_bounds_from_pointer_arith): Same.
+ (infer_loop_bounds_from_signedness): Same.
+ (bound_index): Same.
+ (discover_iteration_bound_by_body_walk): Same.
+ (maybe_lower_iteration_bound): Same.
+ (estimate_numbers_of_iterations): Same.
+ (estimated_loop_iterations): Same.
+ (estimated_loop_iterations_int): Same.
+ (max_loop_iterations): Same.
+ (max_loop_iterations_int): Same.
+ (likely_max_loop_iterations): Same.
+ (likely_max_loop_iterations_int): Same.
+ (estimated_stmt_executions_int): Same.
+ (max_stmt_executions): Same.
+ (likely_max_stmt_executions): Same.
+ (estimated_stmt_executions): Same.
+ (stmt_dominates_stmt_p): Same.
+ (nowrap_type_p): Same.
+ (loop_exits_before_overflow): Same.
+ (scev_var_range_cant_overflow): Same.
+ (scev_probably_wraps_p): Same.
+ (free_numbers_of_iterations_estimates): Same.
+ * tree-ssa-loop-niter.h: Same.
+ * tree-ssa-loop-prefetch.c (release_mem_refs): Same.
+ (idx_analyze_ref): Same.
+ (analyze_ref): Same.
+ (gather_memory_references_ref): Same.
+ (mark_nontemporal_store): Same.
+ (emit_mfence_after_loop): Same.
+ (may_use_storent_in_loop_p): Same.
+ (mark_nontemporal_stores): Same.
+ (should_unroll_loop_p): Same.
+ (volume_of_dist_vector): Same.
+ (add_subscript_strides): Same.
+ (self_reuse_distance): Same.
+ (insn_to_prefetch_ratio_too_small_p): Same.
+ * tree-ssa-loop-split.c (split_at_bb_p): Same.
+ (patch_loop_exit): Same.
+ (find_or_create_guard_phi): Same.
+ (easy_exit_values): Same.
+ (connect_loop_phis): Same.
+ (connect_loops): Same.
+ (compute_new_first_bound): Same.
+ (split_loop): Same.
+ (tree_ssa_split_loops): Same.
+ * tree-ssa-loop-unswitch.c (tree_ssa_unswitch_loops): Same.
+ (is_maybe_undefined): Same.
+ (tree_may_unswitch_on): Same.
+ (simplify_using_entry_checks): Same.
+ (tree_unswitch_single_loop): Same.
+ (tree_unswitch_loop): Same.
+ (tree_unswitch_outer_loop): Same.
+ (empty_bb_without_guard_p): Same.
+ (used_outside_loop_p): Same.
+ (get_vop_from_header): Same.
+ (hoist_guard): Same.
+ * tree-ssa-loop.c (gate_oacc_kernels): Same.
+ (get_lsm_tmp_name): Same.
+ * tree-ssa-loop.h: Same.
+ * tree-ssa-reassoc.c (add_repeat_to_ops_vec): Same.
+ (build_and_add_sum): Same.
+ (no_side_effect_bb): Same.
+ (get_ops): Same.
+ (linearize_expr): Same.
+ (should_break_up_subtract): Same.
+ (linearize_expr_tree): Same.
+ * tree-ssa-scopedtables.c: Same.
+ * tree-ssa-scopedtables.h: Same.
+ * tree-ssa-structalias.c (condense_visit): Same.
+ (label_visit): Same.
+ (dump_pred_graph): Same.
+ (perform_var_substitution): Same.
+ (move_complex_constraints): Same.
+ (remove_preds_and_fake_succs): Same.
+ * tree-ssa-threadupdate.c (dbds_continue_enumeration_p): Same.
+ (determine_bb_domination_status): Same.
+ (duplicate_thread_path): Same.
+ (thread_through_all_blocks): Same.
+ * tree-ssa-threadupdate.h: Same.
+ * tree-streamer-in.c (streamer_read_string_cst): Same.
+ (input_identifier): Same.
+ (unpack_ts_type_common_value_fields): Same.
+ (unpack_ts_block_value_fields): Same.
+ (unpack_ts_translation_unit_decl_value_fields): Same.
+ (unpack_ts_omp_clause_value_fields): Same.
+ (streamer_read_tree_bitfields): Same.
+ (streamer_alloc_tree): Same.
+ (lto_input_ts_common_tree_pointers): Same.
+ (lto_input_ts_vector_tree_pointers): Same.
+ (lto_input_ts_poly_tree_pointers): Same.
+ (lto_input_ts_complex_tree_pointers): Same.
+ (lto_input_ts_decl_minimal_tree_pointers): Same.
+ (lto_input_ts_decl_common_tree_pointers): Same.
+ (lto_input_ts_decl_non_common_tree_pointers): Same.
+ (lto_input_ts_decl_with_vis_tree_pointers): Same.
+ (lto_input_ts_field_decl_tree_pointers): Same.
+ (lto_input_ts_function_decl_tree_pointers): Same.
+ (lto_input_ts_type_common_tree_pointers): Same.
+ (lto_input_ts_type_non_common_tree_pointers): Same.
+ (lto_input_ts_list_tree_pointers): Same.
+ (lto_input_ts_vec_tree_pointers): Same.
+ (lto_input_ts_exp_tree_pointers): Same.
+ (lto_input_ts_block_tree_pointers): Same.
+ (lto_input_ts_binfo_tree_pointers): Same.
+ (lto_input_ts_constructor_tree_pointers): Same.
+ (lto_input_ts_omp_clause_tree_pointers): Same.
+ (streamer_read_tree_body): Same.
+ * tree-streamer.h: Same.
+ * tree-switch-conversion.c (bit_test_cluster::is_beneficial): Same.
+ * tree-vect-data-refs.c (vect_get_smallest_scalar_type): Same.
+ (vect_analyze_possibly_independent_ddr): Same.
+ (vect_analyze_data_ref_dependence): Same.
+ (vect_compute_data_ref_alignment): Same.
+ (vect_enhance_data_refs_alignment): Same.
+ (vect_analyze_data_ref_access): Same.
+ (vect_check_gather_scatter): Same.
+ (vect_find_stmt_data_reference): Same.
+ (vect_create_addr_base_for_vector_ref): Same.
+ (vect_setup_realignment): Same.
+ (vect_supportable_dr_alignment): Same.
+ * tree-vect-loop-manip.c (rename_variables_in_bb): Same.
+ (adjust_phi_and_debug_stmts): Same.
+ (vect_set_loop_mask): Same.
+ (add_preheader_seq): Same.
+ (vect_maybe_permute_loop_masks): Same.
+ (vect_set_loop_masks_directly): Same.
+ (vect_set_loop_condition_masked): Same.
+ (vect_set_loop_condition_unmasked): Same.
+ (slpeel_duplicate_current_defs_from_edges): Same.
+ (slpeel_add_loop_guard): Same.
+ (slpeel_can_duplicate_loop_p): Same.
+ (create_lcssa_for_virtual_phi): Same.
+ (iv_phi_p): Same.
+ (vect_update_ivs_after_vectorizer): Same.
+ (vect_gen_vector_loop_niters_mult_vf): Same.
+ (slpeel_update_phi_nodes_for_loops): Same.
+ (slpeel_update_phi_nodes_for_guard1): Same.
+ (find_guard_arg): Same.
+ (slpeel_update_phi_nodes_for_guard2): Same.
+ (slpeel_update_phi_nodes_for_lcssa): Same.
+ (vect_do_peeling): Same.
+ (vect_create_cond_for_alias_checks): Same.
+ (vect_loop_versioning): Same.
+ * tree-vect-loop.c (vect_determine_vf_for_stmt): Same.
+ (vect_inner_phi_in_double_reduction_p): Same.
+ (vect_analyze_scalar_cycles_1): Same.
+ (vect_fixup_scalar_cycles_with_patterns): Same.
+ (vect_get_loop_niters): Same.
+ (bb_in_loop_p): Same.
+ (vect_get_max_nscalars_per_iter): Same.
+ (vect_verify_full_masking): Same.
+ (vect_compute_single_scalar_iteration_cost): Same.
+ (vect_analyze_loop_form_1): Same.
+ (vect_analyze_loop_form): Same.
+ (vect_active_double_reduction_p): Same.
+ (vect_analyze_loop_operations): Same.
+ (neutral_op_for_slp_reduction): Same.
+ (vect_is_simple_reduction): Same.
+ (vect_model_reduction_cost): Same.
+ (get_initial_def_for_reduction): Same.
+ (get_initial_defs_for_reduction): Same.
+ (vect_create_epilog_for_reduction): Same.
+ (vectorize_fold_left_reduction): Same.
+ (vectorizable_reduction): Same.
+ (vectorizable_induction): Same.
+ (vectorizable_live_operation): Same.
+ (loop_niters_no_overflow): Same.
+ (vect_get_loop_mask): Same.
+ (vect_transform_loop_stmt): Same.
+ (vect_transform_loop): Same.
+ * tree-vect-patterns.c (vect_reassociating_reduction_p): Same.
+ (vect_determine_precisions): Same.
+ (vect_pattern_recog_1): Same.
+ * tree-vect-slp.c (vect_analyze_slp_instance): Same.
+ * tree-vect-stmts.c (stmt_vectype): Same.
+ (process_use): Same.
+ (vect_init_vector_1): Same.
+ (vect_truncate_gather_scatter_offset): Same.
+ (get_group_load_store_type): Same.
+ (vect_build_gather_load_calls): Same.
+ (vect_get_strided_load_store_ops): Same.
+ (vectorizable_simd_clone_call): Same.
+ (vectorizable_store): Same.
+ (permute_vec_elements): Same.
+ (vectorizable_load): Same.
+ (vect_transform_stmt): Same.
+ (supportable_widening_operation): Same.
+ * tree-vectorizer.c (vec_info::replace_stmt): Same.
+ (vec_info::free_stmt_vec_info): Same.
+ (vect_free_loop_info_assumptions): Same.
+ (vect_loop_vectorized_call): Same.
+ (set_uid_loop_bbs): Same.
+ (vectorize_loops): Same.
+ * tree-vectorizer.h (STMT_VINFO_BB_VINFO): Same.
+ * tree.c (add_tree_to_fld_list): Same.
+ (fld_type_variant_equal_p): Same.
+ (fld_decl_context): Same.
+ (fld_incomplete_type_of): Same.
+ (free_lang_data_in_binfo): Same.
+ (need_assembler_name_p): Same.
+ (find_decls_types_r): Same.
+ (get_eh_types_for_runtime): Same.
+ (find_decls_types_in_eh_region): Same.
+ (find_decls_types_in_node): Same.
+ (assign_assembler_name_if_needed): Same.
+ * value-prof.c (stream_out_histogram_value): Same.
+ * value-prof.h: Same.
+ * var-tracking.c (use_narrower_mode): Same.
+ (prepare_call_arguments): Same.
+ (vt_expand_loc_callback): Same.
+ (resolve_expansions_pending_recursion): Same.
+ (vt_expand_loc): Same.
+ * varasm.c (const_hash_1): Same.
+ (compare_constant): Same.
+ (tree_output_constant_def): Same.
+ (simplify_subtraction): Same.
+ (get_pool_constant): Same.
+ (output_constant_pool_2): Same.
+ (output_constant_pool_1): Same.
+ (mark_constants_in_pattern): Same.
+ (mark_constant_pool): Same.
+ (get_section_anchor): Same.
+ * vr-values.c (compare_range_with_value): Same.
+ (vr_values::extract_range_from_phi_node): Same.
+ * vr-values.h: Same.
+ * web.c (unionfind_union): Same.
+ * wide-int.h: Same.
+
+2019-07-09 Martin Sebor <msebor@redhat.com>
+
+ PR c++/61339
* align.h: Change class-key from class to struct and vice versa
to match convention and avoid -Wclass-is-pod and -Wstruct-no-pod.
* alloc-pool.h: Same.
diff --git a/gcc/auto-profile.c b/gcc/auto-profile.c
index 59100a0a47d..ee1a83abce2 100644
--- a/gcc/auto-profile.c
+++ b/gcc/auto-profile.c
@@ -104,7 +104,7 @@ namespace autofdo
/* Intermediate edge info used when propagating AutoFDO profile information.
We can't edge->count() directly since it's computed from edge's probability
while probability is yet not decided during propagation. */
-#define AFDO_EINFO(e) ((struct edge_info *) e->aux)
+#define AFDO_EINFO(e) ((class edge_info *) e->aux)
class edge_info
{
public:
diff --git a/gcc/basic-block.h b/gcc/basic-block.h
index 964f2bd5784..5e0fbc0e2b3 100644
--- a/gcc/basic-block.h
+++ b/gcc/basic-block.h
@@ -123,7 +123,7 @@ struct GTY((chain_next ("%h.next_bb"), chain_prev ("%h.prev_bb"))) basic_block_d
PTR GTY ((skip (""))) aux;
/* Innermost loop containing the block. */
- struct loop *loop_father;
+ class loop *loop_father;
/* The dominance and postdominance information node. */
struct et_node * GTY ((skip (""))) dom[2];
diff --git a/gcc/bitmap.c b/gcc/bitmap.c
index 894aefa13de..c99d6465ed4 100644
--- a/gcc/bitmap.c
+++ b/gcc/bitmap.c
@@ -775,7 +775,7 @@ bitmap_alloc (bitmap_obstack *bit_obstack MEM_STAT_DECL)
bit_obstack = &bitmap_default_obstack;
map = bit_obstack->heads;
if (map)
- bit_obstack->heads = (struct bitmap_head *) map->first;
+ bit_obstack->heads = (class bitmap_head *) map->first;
else
map = XOBNEW (&bit_obstack->obstack, bitmap_head);
bitmap_initialize (map, bit_obstack PASS_MEM_STAT);
diff --git a/gcc/bitmap.h b/gcc/bitmap.h
index 0e3ffc8862f..b0ca7b96c94 100644
--- a/gcc/bitmap.h
+++ b/gcc/bitmap.h
@@ -290,7 +290,7 @@ typedef unsigned long BITMAP_WORD;
/* Obstack for allocating bitmaps and elements from. */
struct bitmap_obstack {
struct bitmap_element *elements;
- struct bitmap_head *heads;
+ bitmap_head *heads;
struct obstack obstack;
};
diff --git a/gcc/builtins.c b/gcc/builtins.c
index e2ba356c0d3..e5a9261e84c 100644
--- a/gcc/builtins.c
+++ b/gcc/builtins.c
@@ -1400,7 +1400,7 @@ expand_builtin_prefetch (tree exp)
if (targetm.have_prefetch ())
{
- struct expand_operand ops[3];
+ class expand_operand ops[3];
create_address_operand (&ops[0], op0);
create_integer_operand (&ops[1], INTVAL (op1));
@@ -2445,7 +2445,7 @@ expand_builtin_interclass_mathfn (tree exp, rtx target)
if (icode != CODE_FOR_nothing)
{
- struct expand_operand ops[1];
+ class expand_operand ops[1];
rtx_insn *last = get_last_insn ();
tree orig_arg = arg;
@@ -2946,7 +2946,7 @@ expand_builtin_strlen (tree exp, rtx target,
if (!validate_arglist (exp, POINTER_TYPE, VOID_TYPE))
return NULL_RTX;
- struct expand_operand ops[4];
+ class expand_operand ops[4];
rtx pat;
tree len;
tree src = CALL_EXPR_ARG (exp, 0);
@@ -3923,7 +3923,7 @@ expand_builtin_mempcpy_args (tree dest, tree src, tree len,
static rtx
expand_movstr (tree dest, tree src, rtx target, memop_ret retmode)
{
- struct expand_operand ops[3];
+ class expand_operand ops[3];
rtx dest_mem;
rtx src_mem;
@@ -4633,7 +4633,7 @@ expand_cmpstr (insn_code icode, rtx target, rtx arg1_rtx, rtx arg2_rtx,
if (target && (!REG_P (target) || HARD_REGISTER_P (target)))
target = NULL_RTX;
- struct expand_operand ops[4];
+ class expand_operand ops[4];
create_output_operand (&ops[0], target, insn_mode);
create_fixed_operand (&ops[1], arg1_rtx);
create_fixed_operand (&ops[2], arg2_rtx);
@@ -5606,7 +5606,7 @@ expand_builtin___clear_cache (tree exp)
if (targetm.have_clear_cache ())
{
- struct expand_operand ops[2];
+ class expand_operand ops[2];
begin = CALL_EXPR_ARG (exp, 0);
begin_rtx = expand_expr (begin, NULL_RTX, Pmode, EXPAND_NORMAL);
@@ -6566,7 +6566,7 @@ expand_ifn_atomic_bit_test_and (gcall *call)
machine_mode mode = TYPE_MODE (TREE_TYPE (flag));
enum rtx_code code;
optab optab;
- struct expand_operand ops[5];
+ class expand_operand ops[5];
gcc_assert (flag_inline_atomics);
@@ -6874,7 +6874,7 @@ expand_builtin_thread_pointer (tree exp, rtx target)
icode = direct_optab_handler (get_thread_pointer_optab, Pmode);
if (icode != CODE_FOR_nothing)
{
- struct expand_operand op;
+ class expand_operand op;
/* If the target is not sutitable then create a new target. */
if (target == NULL_RTX
|| !REG_P (target)
@@ -6897,7 +6897,7 @@ expand_builtin_set_thread_pointer (tree exp)
icode = direct_optab_handler (set_thread_pointer_optab, Pmode);
if (icode != CODE_FOR_nothing)
{
- struct expand_operand op;
+ class expand_operand op;
rtx val = expand_expr (CALL_EXPR_ARG (exp, 0), NULL_RTX,
Pmode, EXPAND_NORMAL);
create_input_operand (&op, val, Pmode);
diff --git a/gcc/c-family/ChangeLog b/gcc/c-family/ChangeLog
index 4d8f967b341..bbad47fc497 100644
--- a/gcc/c-family/ChangeLog
+++ b/gcc/c-family/ChangeLog
@@ -1,6 +1,13 @@
2019-07-09 Martin Sebor <msebor@redhat.com>
PR c++/61339
+ * c-opts.c (handle_deferred_opts): : Change class-key of PODs to struct
+ and others to class.
+ * c-pretty-print.h: Same.
+
+2019-07-09 Martin Sebor <msebor@redhat.com>
+
+ PR c++/61339
* c-format.c (check_argument_type): Change class-key from class to
struct and vice versa to match convention and avoid -Wclass-is-pod
and -Wstruct-no-pod.
diff --git a/gcc/c-family/c-opts.c b/gcc/c-family/c-opts.c
index 188da437507..e97bbdf5c6f 100644
--- a/gcc/c-family/c-opts.c
+++ b/gcc/c-family/c-opts.c
@@ -1287,7 +1287,7 @@ handle_deferred_opts (void)
if (!deps_seen)
return;
- struct mkdeps *deps = cpp_get_deps (parse_in);
+ mkdeps *deps = cpp_get_deps (parse_in);
for (size_t i = 0; i < deferred_count; i++)
{
diff --git a/gcc/c-family/c-pretty-print.h b/gcc/c-family/c-pretty-print.h
index 8c516c3e56b..8d69620b724 100644
--- a/gcc/c-family/c-pretty-print.h
+++ b/gcc/c-family/c-pretty-print.h
@@ -36,7 +36,7 @@ enum pp_c_pretty_print_flags
/* The data type used to bundle information necessary for pretty-printing
a C or C++ entity. */
-struct c_pretty_printer;
+class c_pretty_printer;
/* The type of a C pretty-printer 'member' function. */
typedef void (*c_pretty_print_fn) (c_pretty_printer *, tree);
diff --git a/gcc/c/ChangeLog b/gcc/c/ChangeLog
index d2645a84dec..29836ff9595 100644
--- a/gcc/c/ChangeLog
+++ b/gcc/c/ChangeLog
@@ -1,6 +1,16 @@
2019-07-09 Martin Sebor <msebor@redhat.com>
PR c++/61339
+ * c-decl.c (xref_tag): Change class-key of PODs to struct and others
+ to class.
+ (field_decl_cmp): Same.
+ * c-parser.c (c_parser_struct_or_union_specifier): Same.
+ * c-tree.h: Same.
+ * gimple-parser.c (c_parser_gimple_compound_statement): Same.
+
+2019-07-09 Martin Sebor <msebor@redhat.com>
+
+ PR c++/61339
* c-decl.c: Change class-key from class to struct and vice versa
to match convention and avoid -Wclass-is-pod and -Wstruct-no-pod.
* gimple-parser.c: Same.
diff --git a/gcc/c/c-decl.c b/gcc/c/c-decl.c
index 35e15ba2a8b..d75648aa273 100644
--- a/gcc/c/c-decl.c
+++ b/gcc/c/c-decl.c
@@ -592,7 +592,7 @@ public:
/* Information for the struct or union currently being parsed, or
NULL if not parsing a struct or union. */
-static struct c_struct_parse_info *struct_parse_info;
+static class c_struct_parse_info *struct_parse_info;
/* Forward declarations. */
static tree lookup_name_in_scope (tree, struct c_scope *);
@@ -7768,7 +7768,7 @@ xref_tag (enum tree_code code, tree name)
tree
start_struct (location_t loc, enum tree_code code, tree name,
- struct c_struct_parse_info **enclosing_struct_parse_info)
+ class c_struct_parse_info **enclosing_struct_parse_info)
{
/* If there is already a tag defined at this scope
(as a forward reference), just return it. */
@@ -8183,7 +8183,7 @@ field_decl_cmp (const void *x_p, const void *y_p)
tree
finish_struct (location_t loc, tree t, tree fieldlist, tree attributes,
- struct c_struct_parse_info *enclosing_struct_parse_info)
+ class c_struct_parse_info *enclosing_struct_parse_info)
{
tree x;
bool toplevel = file_scope == current_scope;
diff --git a/gcc/c/c-parser.c b/gcc/c/c-parser.c
index 98508721ed9..3fa7e682b8f 100644
--- a/gcc/c/c-parser.c
+++ b/gcc/c/c-parser.c
@@ -3145,7 +3145,7 @@ c_parser_struct_or_union_specifier (c_parser *parser)
{
/* Parse a struct or union definition. Start the scope of the
tag before parsing components. */
- struct c_struct_parse_info *struct_info;
+ class c_struct_parse_info *struct_info;
tree type = start_struct (struct_loc, code, ident, &struct_info);
tree postfix_attrs;
/* We chain the components in reverse order, then put them in
diff --git a/gcc/c/c-tree.h b/gcc/c/c-tree.h
index 346f46a5207..dae2979d482 100644
--- a/gcc/c/c-tree.h
+++ b/gcc/c/c-tree.h
@@ -525,7 +525,7 @@ extern void gen_aux_info_record (tree, int, int, int);
/* in c-decl.c */
struct c_spot_bindings;
-struct c_struct_parse_info;
+class c_struct_parse_info;
extern struct obstack parser_obstack;
extern tree c_break_label;
extern tree c_cont_label;
@@ -562,7 +562,7 @@ extern void finish_decl (tree, location_t, tree, tree, tree);
extern tree finish_enum (tree, tree, tree);
extern void finish_function (void);
extern tree finish_struct (location_t, tree, tree, tree,
- struct c_struct_parse_info *);
+ class c_struct_parse_info *);
extern struct c_arg_info *build_arg_info (void);
extern struct c_arg_info *get_parm_info (bool, tree);
extern tree grokfield (location_t, struct c_declarator *,
@@ -586,7 +586,7 @@ extern bool start_function (struct c_declspecs *, struct c_declarator *, tree);
extern tree start_decl (struct c_declarator *, struct c_declspecs *, bool,
tree);
extern tree start_struct (location_t, enum tree_code, tree,
- struct c_struct_parse_info **);
+ class c_struct_parse_info **);
extern void store_parm_decls (void);
extern void store_parm_decls_from (struct c_arg_info *);
extern void temp_store_parm_decls (tree, tree);
diff --git a/gcc/c/gimple-parser.c b/gcc/c/gimple-parser.c
index e388d93b9e2..9a65394a020 100644
--- a/gcc/c/gimple-parser.c
+++ b/gcc/c/gimple-parser.c
@@ -585,7 +585,7 @@ c_parser_gimple_compound_statement (gimple_parser &parser, gimple_seq *seq)
profile_probability::always ());
/* We leave the proper setting to fixup. */
- struct loop *loop_father = loops_for_fn (cfun)->tree_root;
+ class loop *loop_father = loops_for_fn (cfun)->tree_root;
/* If the new block is a loop header, allocate a loop
struct. Fixup will take care of proper placement within
the loop tree. */
@@ -598,7 +598,7 @@ c_parser_gimple_compound_statement (gimple_parser &parser, gimple_seq *seq)
}
else
{
- struct loop *loop = alloc_loop ();
+ class loop *loop = alloc_loop ();
loop->num = is_loop_header_of;
loop->header = bb;
vec_safe_grow_cleared (loops_for_fn (cfun)->larray,
diff --git a/gcc/caller-save.c b/gcc/caller-save.c
index 9ff470c33d4..7c1de894976 100644
--- a/gcc/caller-save.c
+++ b/gcc/caller-save.c
@@ -88,11 +88,11 @@ static void mark_set_regs (rtx, const_rtx, void *);
static void mark_referenced_regs (rtx *, refmarker_fn *mark, void *mark_arg);
static refmarker_fn mark_reg_as_referenced;
static refmarker_fn replace_reg_with_saved_mem;
-static int insert_save (struct insn_chain *, int, HARD_REG_SET *,
+static int insert_save (class insn_chain *, int, HARD_REG_SET *,
machine_mode *);
-static int insert_restore (struct insn_chain *, int, int, int,
+static int insert_restore (class insn_chain *, int, int, int,
machine_mode *);
-static struct insn_chain *insert_one_insn (struct insn_chain *, int, int,
+static class insn_chain *insert_one_insn (class insn_chain *, int, int,
rtx);
static void add_stored_regs (rtx, const_rtx, void *);
@@ -419,7 +419,7 @@ setup_save_areas (void)
HARD_REG_SET hard_regs_used;
struct saved_hard_reg *saved_reg;
rtx_insn *insn;
- struct insn_chain *chain, *next;
+ class insn_chain *chain, *next;
unsigned int regno;
HARD_REG_SET hard_regs_to_save, used_regs, this_insn_sets;
reg_set_iterator rsi;
@@ -744,7 +744,7 @@ setup_save_areas (void)
void
save_call_clobbered_regs (void)
{
- struct insn_chain *chain, *next, *last = NULL;
+ class insn_chain *chain, *next, *last = NULL;
machine_mode save_mode [FIRST_PSEUDO_REGISTER];
/* Computed in mark_set_regs, holds all registers set by the current
@@ -1174,14 +1174,14 @@ replace_reg_with_saved_mem (rtx *loc,
Return the extra number of registers saved. */
static int
-insert_restore (struct insn_chain *chain, int before_p, int regno,
+insert_restore (class insn_chain *chain, int before_p, int regno,
int maxrestore, machine_mode *save_mode)
{
int i, k;
rtx pat = NULL_RTX;
int code;
unsigned int numregs = 0;
- struct insn_chain *new_chain;
+ class insn_chain *new_chain;
rtx mem;
/* A common failure mode if register status is not correct in the
@@ -1253,7 +1253,7 @@ insert_restore (struct insn_chain *chain, int before_p, int regno,
/* Like insert_restore above, but save registers instead. */
static int
-insert_save (struct insn_chain *chain, int regno,
+insert_save (class insn_chain *chain, int regno,
HARD_REG_SET *to_save, machine_mode *save_mode)
{
int i;
@@ -1261,7 +1261,7 @@ insert_save (struct insn_chain *chain, int regno,
rtx pat = NULL_RTX;
int code;
unsigned int numregs = 0;
- struct insn_chain *new_chain;
+ class insn_chain *new_chain;
rtx mem;
/* A common failure mode if register status is not correct in the
@@ -1351,11 +1351,11 @@ add_used_regs (rtx *loc, void *data)
}
/* Emit a new caller-save insn and set the code. */
-static struct insn_chain *
-insert_one_insn (struct insn_chain *chain, int before_p, int code, rtx pat)
+static class insn_chain *
+insert_one_insn (class insn_chain *chain, int before_p, int code, rtx pat)
{
rtx_insn *insn = chain->insn;
- struct insn_chain *new_chain;
+ class insn_chain *new_chain;
/* If INSN references CC0, put our insns in front of the insn that sets
CC0. This is always safe, since the only way we could be passed an
diff --git a/gcc/cfg.c b/gcc/cfg.c
index 983115ee40a..4757bab1dee 100644
--- a/gcc/cfg.c
+++ b/gcc/cfg.c
@@ -1145,7 +1145,7 @@ get_bb_copy (basic_block bb)
initialized so passes not needing this don't need to care. */
void
-set_loop_copy (struct loop *loop, struct loop *copy)
+set_loop_copy (class loop *loop, class loop *copy)
{
if (!copy)
copy_original_table_clear (loop_copy, loop->num);
@@ -1155,8 +1155,8 @@ set_loop_copy (struct loop *loop, struct loop *copy)
/* Get the copy of LOOP. */
-struct loop *
-get_loop_copy (struct loop *loop)
+class loop *
+get_loop_copy (class loop *loop)
{
struct htab_bb_copy_original_entry *entry;
struct htab_bb_copy_original_entry key;
diff --git a/gcc/cfg.h b/gcc/cfg.h
index b6f95bd21fe..12cd760899e 100644
--- a/gcc/cfg.h
+++ b/gcc/cfg.h
@@ -122,8 +122,8 @@ extern void set_bb_original (basic_block, basic_block);
extern basic_block get_bb_original (basic_block);
extern void set_bb_copy (basic_block, basic_block);
extern basic_block get_bb_copy (basic_block);
-void set_loop_copy (struct loop *, struct loop *);
-struct loop *get_loop_copy (struct loop *);
+void set_loop_copy (class loop *, class loop *);
+class loop *get_loop_copy (class loop *);
/* Generic RAII class to allocate a bit from storage of integer type T.
The allocated bit is accessible as mask with the single bit set
diff --git a/gcc/cfganal.h b/gcc/cfganal.h
index ba889e2f563..c928feae625 100644
--- a/gcc/cfganal.h
+++ b/gcc/cfganal.h
@@ -72,8 +72,8 @@ extern int rev_post_order_and_mark_dfs_back_seme (struct function *, edge,
extern int dfs_enumerate_from (basic_block, int,
bool (*)(const_basic_block, const void *),
basic_block *, int, const void *);
-extern void compute_dominance_frontiers (struct bitmap_head *);
-extern bitmap compute_idf (bitmap, struct bitmap_head *);
+extern void compute_dominance_frontiers (class bitmap_head *);
+extern bitmap compute_idf (bitmap, class bitmap_head *);
extern void bitmap_intersection_of_succs (sbitmap, sbitmap *, basic_block);
extern void bitmap_intersection_of_preds (sbitmap, sbitmap *, basic_block);
extern void bitmap_union_of_succs (sbitmap, sbitmap *, basic_block);
diff --git a/gcc/cfgexpand.c b/gcc/cfgexpand.c
index 0128fca4e2b..d0c1d31a2cc 100644
--- a/gcc/cfgexpand.c
+++ b/gcc/cfgexpand.c
@@ -332,7 +332,7 @@ public:
#define EOC ((size_t)-1)
/* We have an array of such objects while deciding allocation. */
-static struct stack_var *stack_vars;
+static class stack_var *stack_vars;
static size_t stack_vars_alloc;
static size_t stack_vars_num;
static hash_map<tree, size_t> *decl_to_stack_part;
@@ -426,7 +426,7 @@ alloc_stack_frame_space (poly_int64 size, unsigned HOST_WIDE_INT align)
static void
add_stack_var (tree decl, bool really_expand)
{
- struct stack_var *v;
+ class stack_var *v;
if (stack_vars_num >= stack_vars_alloc)
{
@@ -435,7 +435,7 @@ add_stack_var (tree decl, bool really_expand)
else
stack_vars_alloc = 32;
stack_vars
- = XRESIZEVEC (struct stack_var, stack_vars, stack_vars_alloc);
+ = XRESIZEVEC (class stack_var, stack_vars, stack_vars_alloc);
}
if (!decl_to_stack_part)
decl_to_stack_part = new hash_map<tree, size_t>;
@@ -474,8 +474,8 @@ add_stack_var (tree decl, bool really_expand)
static void
add_stack_var_conflict (size_t x, size_t y)
{
- struct stack_var *a = &stack_vars[x];
- struct stack_var *b = &stack_vars[y];
+ class stack_var *a = &stack_vars[x];
+ class stack_var *b = &stack_vars[y];
if (x == y)
return;
if (!a->conflicts)
@@ -491,8 +491,8 @@ add_stack_var_conflict (size_t x, size_t y)
static bool
stack_var_conflict_p (size_t x, size_t y)
{
- struct stack_var *a = &stack_vars[x];
- struct stack_var *b = &stack_vars[y];
+ class stack_var *a = &stack_vars[x];
+ class stack_var *b = &stack_vars[y];
if (x == y)
return false;
/* Partitions containing an SSA name result from gimple registers
@@ -607,7 +607,7 @@ add_scope_conflicts_1 (basic_block bb, bitmap work, bool for_conflict)
unsigned i;
EXECUTE_IF_SET_IN_BITMAP (work, 0, i, bi)
{
- struct stack_var *a = &stack_vars[i];
+ class stack_var *a = &stack_vars[i];
if (!a->conflicts)
a->conflicts = BITMAP_ALLOC (&stack_var_bitmap_obstack);
bitmap_ior_into (a->conflicts, work);
@@ -853,7 +853,7 @@ update_alias_info_with_stack_vars (void)
static void
union_stack_vars (size_t a, size_t b)
{
- struct stack_var *vb = &stack_vars[b];
+ class stack_var *vb = &stack_vars[b];
bitmap_iterator bi;
unsigned u;
@@ -1045,7 +1045,7 @@ public:
with that location. */
static void
-expand_stack_vars (bool (*pred) (size_t), struct stack_vars_data *data)
+expand_stack_vars (bool (*pred) (size_t), class stack_vars_data *data)
{
size_t si, i, j, n = stack_vars_num;
poly_uint64 large_size = 0, large_alloc = 0;
@@ -2232,7 +2232,7 @@ expand_used_vars (void)
/* Assign rtl to each variable based on these partitions. */
if (stack_vars_num > 0)
{
- struct stack_vars_data data;
+ class stack_vars_data data;
data.asan_base = NULL_RTX;
data.asan_alignb = 0;
diff --git a/gcc/cfghooks.c b/gcc/cfghooks.c
index 76183ecf6ae..7c00fc370f8 100644
--- a/gcc/cfghooks.c
+++ b/gcc/cfghooks.c
@@ -496,7 +496,7 @@ redirect_edge_and_branch_force (edge e, basic_block dest)
{
if (ret != NULL)
{
- struct loop *loop
+ class loop *loop
= find_common_loop (single_pred (ret)->loop_father,
single_succ (ret)->loop_father);
add_bb_to_loop (ret, loop);
@@ -604,7 +604,7 @@ delete_basic_block (basic_block bb)
if (current_loops != NULL)
{
- struct loop *loop = bb->loop_father;
+ class loop *loop = bb->loop_father;
/* If we remove the header or the latch of a loop, mark the loop for
removal. */
@@ -640,7 +640,7 @@ split_edge (edge e)
profile_count count = e->count ();
edge f;
bool irr = (e->flags & EDGE_IRREDUCIBLE_LOOP) != 0;
- struct loop *loop;
+ class loop *loop;
basic_block src = e->src, dest = e->dest;
if (!cfg_hooks->split_edge)
@@ -870,7 +870,7 @@ make_forwarder_block (basic_block bb, bool (*redirect_edge_p) (edge),
edge e, fallthru;
edge_iterator ei;
basic_block dummy, jump;
- struct loop *loop, *ploop, *cloop;
+ class loop *loop, *ploop, *cloop;
if (!cfg_hooks->make_forwarder_block)
internal_error ("%s does not support make_forwarder_block",
@@ -1035,7 +1035,7 @@ force_nonfallthru (edge e)
{
basic_block pred = single_pred (ret);
basic_block succ = single_succ (ret);
- struct loop *loop
+ class loop *loop
= find_common_loop (pred->loop_father, succ->loop_father);
rescan_loop_exit (e, false, true);
add_bb_to_loop (ret, loop);
@@ -1118,8 +1118,8 @@ duplicate_block (basic_block bb, edge e, basic_block after, copy_bb_data *id)
of BB if the loop is not being copied. */
if (current_loops != NULL)
{
- struct loop *cloop = bb->loop_father;
- struct loop *copy = get_loop_copy (cloop);
+ class loop *cloop = bb->loop_father;
+ class loop *copy = get_loop_copy (cloop);
/* If we copied the loop header block but not the loop
we have created a loop with multiple entries. Ditch the loop,
add the new block to the outer loop and arrange for a fixup. */
@@ -1228,7 +1228,7 @@ lv_flush_pending_stmts (edge e)
a need to call the tree_duplicate_loop_to_header_edge rather
than duplicate_loop_to_header_edge when we are in tree mode. */
bool
-cfg_hook_duplicate_loop_to_header_edge (struct loop *loop, edge e,
+cfg_hook_duplicate_loop_to_header_edge (class loop *loop, edge e,
unsigned int ndupl,
sbitmap wont_exit, edge orig,
vec<edge> *to_remove,
@@ -1336,7 +1336,7 @@ end:
void
copy_bbs (basic_block *bbs, unsigned n, basic_block *new_bbs,
edge *edges, unsigned num_edges, edge *new_edges,
- struct loop *base, basic_block after, bool update_dominance)
+ class loop *base, basic_block after, bool update_dominance)
{
unsigned i, j;
basic_block bb, new_bb, dom_bb;
diff --git a/gcc/cfghooks.h b/gcc/cfghooks.h
index 9ed0c363158..627eff9e186 100644
--- a/gcc/cfghooks.h
+++ b/gcc/cfghooks.h
@@ -166,7 +166,7 @@ struct cfg_hooks
/* A hook for duplicating loop in CFG, currently this is used
in loop versioning. */
- bool (*cfg_hook_duplicate_loop_to_header_edge) (struct loop *, edge,
+ bool (*cfg_hook_duplicate_loop_to_header_edge) (class loop *, edge,
unsigned, sbitmap,
edge, vec<edge> *,
int);
@@ -250,7 +250,7 @@ extern bool block_ends_with_condjump_p (const_basic_block bb);
extern int flow_call_edges_add (sbitmap);
extern void execute_on_growing_pred (edge);
extern void execute_on_shrinking_pred (edge);
-extern bool cfg_hook_duplicate_loop_to_header_edge (struct loop *loop, edge,
+extern bool cfg_hook_duplicate_loop_to_header_edge (class loop *loop, edge,
unsigned int ndupl,
sbitmap wont_exit,
edge orig,
@@ -266,7 +266,7 @@ extern void lv_add_condition_to_bb (basic_block, basic_block, basic_block,
extern bool can_copy_bbs_p (basic_block *, unsigned);
extern void copy_bbs (basic_block *, unsigned, basic_block *,
- edge *, unsigned, edge *, struct loop *,
+ edge *, unsigned, edge *, class loop *,
basic_block, bool);
void profile_record_check_consistency (profile_record *);
diff --git a/gcc/cfgloop.c b/gcc/cfgloop.c
index f64326b944e..4ad1f658708 100644
--- a/gcc/cfgloop.c
+++ b/gcc/cfgloop.c
@@ -59,7 +59,7 @@ flow_loops_cfg_dump (FILE *file)
/* Return nonzero if the nodes of LOOP are a subset of OUTER. */
bool
-flow_loop_nested_p (const struct loop *outer, const struct loop *loop)
+flow_loop_nested_p (const class loop *outer, const class loop *loop)
{
unsigned odepth = loop_depth (outer);
@@ -70,8 +70,8 @@ flow_loop_nested_p (const struct loop *outer, const struct loop *loop)
/* Returns the loop such that LOOP is nested DEPTH (indexed from zero)
loops within LOOP. */
-struct loop *
-superloop_at_depth (struct loop *loop, unsigned depth)
+class loop *
+superloop_at_depth (class loop *loop, unsigned depth)
{
unsigned ldepth = loop_depth (loop);
@@ -86,7 +86,7 @@ superloop_at_depth (struct loop *loop, unsigned depth)
/* Returns the list of the latch edges of LOOP. */
static vec<edge>
-get_loop_latch_edges (const struct loop *loop)
+get_loop_latch_edges (const class loop *loop)
{
edge_iterator ei;
edge e;
@@ -105,8 +105,8 @@ get_loop_latch_edges (const struct loop *loop)
using auxiliary dump callback function LOOP_DUMP_AUX if non null. */
void
-flow_loop_dump (const struct loop *loop, FILE *file,
- void (*loop_dump_aux) (const struct loop *, FILE *, int),
+flow_loop_dump (const class loop *loop, FILE *file,
+ void (*loop_dump_aux) (const class loop *, FILE *, int),
int verbose)
{
basic_block *bbs;
@@ -160,9 +160,9 @@ flow_loop_dump (const struct loop *loop, FILE *file,
using auxiliary dump callback function LOOP_DUMP_AUX if non null. */
void
-flow_loops_dump (FILE *file, void (*loop_dump_aux) (const struct loop *, FILE *, int), int verbose)
+flow_loops_dump (FILE *file, void (*loop_dump_aux) (const class loop *, FILE *, int), int verbose)
{
- struct loop *loop;
+ class loop *loop;
if (!current_loops || ! file)
return;
@@ -181,7 +181,7 @@ flow_loops_dump (FILE *file, void (*loop_dump_aux) (const struct loop *, FILE *,
/* Free data allocated for LOOP. */
void
-flow_loop_free (struct loop *loop)
+flow_loop_free (class loop *loop)
{
struct loop_exit *exit, *next;
@@ -229,7 +229,7 @@ flow_loops_free (struct loops *loops)
Return the number of nodes within the loop. */
int
-flow_loop_nodes_find (basic_block header, struct loop *loop)
+flow_loop_nodes_find (basic_block header, class loop *loop)
{
vec<basic_block> stack = vNULL;
int num_nodes = 1;
@@ -278,7 +278,7 @@ flow_loop_nodes_find (basic_block header, struct loop *loop)
superloop is FATHER. */
static void
-establish_preds (struct loop *loop, struct loop *father)
+establish_preds (class loop *loop, class loop *father)
{
loop_p ploop;
unsigned depth = loop_depth (father) + 1;
@@ -302,8 +302,8 @@ establish_preds (struct loop *loop, struct loop *father)
of FATHERs siblings. */
void
-flow_loop_tree_node_add (struct loop *father, struct loop *loop,
- struct loop *after)
+flow_loop_tree_node_add (class loop *father, class loop *loop,
+ class loop *after)
{
if (after)
{
@@ -322,9 +322,9 @@ flow_loop_tree_node_add (struct loop *father, struct loop *loop,
/* Remove LOOP from the loop hierarchy tree. */
void
-flow_loop_tree_node_remove (struct loop *loop)
+flow_loop_tree_node_remove (class loop *loop)
{
- struct loop *prev, *father;
+ class loop *prev, *father;
father = loop_outer (loop);
@@ -343,10 +343,10 @@ flow_loop_tree_node_remove (struct loop *loop)
/* Allocates and returns new loop structure. */
-struct loop *
+class loop *
alloc_loop (void)
{
- struct loop *loop = ggc_cleared_alloc<struct loop> ();
+ class loop *loop = ggc_cleared_alloc<class loop> ();
loop->exits = ggc_cleared_alloc<loop_exit> ();
loop->exits->next = loop->exits->prev = loop->exits;
@@ -365,7 +365,7 @@ void
init_loops_structure (struct function *fn,
struct loops *loops, unsigned num_loops)
{
- struct loop *root;
+ class loop *root;
memset (loops, 0, sizeof *loops);
vec_alloc (loops->larray, num_loops);
@@ -460,7 +460,7 @@ flow_loops_find (struct loops *loops)
basic_block header = BASIC_BLOCK_FOR_FN (cfun, rc_order[b]);
if (bb_loop_header_p (header))
{
- struct loop *loop;
+ class loop *loop;
/* The current active loop tree has valid loop-fathers for
header blocks. */
@@ -503,7 +503,7 @@ flow_loops_find (struct loops *loops)
and assign basic-block ownership. */
for (i = 0; i < larray.length (); ++i)
{
- struct loop *loop = larray[i];
+ class loop *loop = larray[i];
basic_block header = loop->header;
edge_iterator ei;
edge e;
@@ -539,8 +539,8 @@ static int *sort_sibling_loops_cmp_rpo;
static int
sort_sibling_loops_cmp (const void *la_, const void *lb_)
{
- const struct loop *la = *(const struct loop * const *)la_;
- const struct loop *lb = *(const struct loop * const *)lb_;
+ const class loop *la = *(const class loop * const *)la_;
+ const class loop *lb = *(const class loop * const *)lb_;
return (sort_sibling_loops_cmp_rpo[la->header->index]
- sort_sibling_loops_cmp_rpo[lb->header->index]);
}
@@ -643,7 +643,7 @@ find_subloop_latch_edge_by_profile (vec<edge> latches)
another edge. */
static edge
-find_subloop_latch_edge_by_ivs (struct loop *loop ATTRIBUTE_UNUSED, vec<edge> latches)
+find_subloop_latch_edge_by_ivs (class loop *loop ATTRIBUTE_UNUSED, vec<edge> latches)
{
edge e, latch = latches[0];
unsigned i;
@@ -695,7 +695,7 @@ find_subloop_latch_edge_by_ivs (struct loop *loop ATTRIBUTE_UNUSED, vec<edge> la
returns NULL. */
static edge
-find_subloop_latch_edge (struct loop *loop)
+find_subloop_latch_edge (class loop *loop)
{
vec<edge> latches = get_loop_latch_edges (loop);
edge latch = NULL;
@@ -729,11 +729,11 @@ mfb_redirect_edges_in_set (edge e)
/* Creates a subloop of LOOP with latch edge LATCH. */
static void
-form_subloop (struct loop *loop, edge latch)
+form_subloop (class loop *loop, edge latch)
{
edge_iterator ei;
edge e, new_entry;
- struct loop *new_loop;
+ class loop *new_loop;
mfb_reis_set = new hash_set<edge>;
FOR_EACH_EDGE (e, ei, loop->header->preds)
@@ -759,7 +759,7 @@ form_subloop (struct loop *loop, edge latch)
a new latch of LOOP. */
static void
-merge_latch_edges (struct loop *loop)
+merge_latch_edges (class loop *loop)
{
vec<edge> latches = get_loop_latch_edges (loop);
edge latch, e;
@@ -792,7 +792,7 @@ merge_latch_edges (struct loop *loop)
loops with single latch edge. */
static void
-disambiguate_multiple_latches (struct loop *loop)
+disambiguate_multiple_latches (class loop *loop)
{
edge e;
@@ -836,7 +836,7 @@ disambiguate_multiple_latches (struct loop *loop)
void
disambiguate_loops_with_multiple_latches (void)
{
- struct loop *loop;
+ class loop *loop;
FOR_EACH_LOOP (loop, 0)
{
@@ -847,9 +847,9 @@ disambiguate_loops_with_multiple_latches (void)
/* Return nonzero if basic block BB belongs to LOOP. */
bool
-flow_bb_inside_loop_p (const struct loop *loop, const_basic_block bb)
+flow_bb_inside_loop_p (const class loop *loop, const_basic_block bb)
{
- struct loop *source_loop;
+ class loop *source_loop;
if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
|| bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
@@ -863,7 +863,7 @@ flow_bb_inside_loop_p (const struct loop *loop, const_basic_block bb)
static bool
glb_enum_p (const_basic_block bb, const void *glb_loop)
{
- const struct loop *const loop = (const struct loop *) glb_loop;
+ const class loop *const loop = (const class loop *) glb_loop;
return (bb != loop->header
&& dominated_by_p (CDI_DOMINATORS, bb, loop->header));
}
@@ -876,7 +876,7 @@ glb_enum_p (const_basic_block bb, const void *glb_loop)
returned. */
unsigned
-get_loop_body_with_size (const struct loop *loop, basic_block *body,
+get_loop_body_with_size (const class loop *loop, basic_block *body,
unsigned max_size)
{
return dfs_enumerate_from (loop->header, 1, glb_enum_p,
@@ -888,7 +888,7 @@ get_loop_body_with_size (const struct loop *loop, basic_block *body,
header != latch, latch is the 1-st block. */
basic_block *
-get_loop_body (const struct loop *loop)
+get_loop_body (const class loop *loop)
{
basic_block *body, bb;
unsigned tv = 0;
@@ -918,7 +918,7 @@ get_loop_body (const struct loop *loop)
array TOVISIT from index *TV. */
static void
-fill_sons_in_loop (const struct loop *loop, basic_block bb,
+fill_sons_in_loop (const class loop *loop, basic_block bb,
basic_block *tovisit, int *tv)
{
basic_block son, postpone = NULL;
@@ -948,7 +948,7 @@ fill_sons_in_loop (const struct loop *loop, basic_block bb,
the latch, then only blocks dominated by s are be after it. */
basic_block *
-get_loop_body_in_dom_order (const struct loop *loop)
+get_loop_body_in_dom_order (const class loop *loop)
{
basic_block *tovisit;
int tv;
@@ -970,7 +970,7 @@ get_loop_body_in_dom_order (const struct loop *loop)
/* Gets body of a LOOP sorted via provided BB_COMPARATOR. */
basic_block *
-get_loop_body_in_custom_order (const struct loop *loop,
+get_loop_body_in_custom_order (const class loop *loop,
int (*bb_comparator) (const void *, const void *))
{
basic_block *bbs = get_loop_body (loop);
@@ -983,7 +983,7 @@ get_loop_body_in_custom_order (const struct loop *loop,
/* Get body of a LOOP in breadth first sort order. */
basic_block *
-get_loop_body_in_bfs_order (const struct loop *loop)
+get_loop_body_in_bfs_order (const class loop *loop)
{
basic_block *blocks;
basic_block bb;
@@ -1069,7 +1069,7 @@ void
rescan_loop_exit (edge e, bool new_edge, bool removed)
{
struct loop_exit *exits = NULL, *exit;
- struct loop *aloop, *cloop;
+ class loop *aloop, *cloop;
if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
return;
@@ -1190,7 +1190,7 @@ release_recorded_exits (function *fn)
/* Returns the list of the exit edges of a LOOP. */
vec<edge>
-get_loop_exit_edges (const struct loop *loop)
+get_loop_exit_edges (const class loop *loop)
{
vec<edge> edges = vNULL;
edge e;
@@ -1226,7 +1226,7 @@ get_loop_exit_edges (const struct loop *loop)
/* Counts the number of conditional branches inside LOOP. */
unsigned
-num_loop_branches (const struct loop *loop)
+num_loop_branches (const class loop *loop)
{
unsigned i, n;
basic_block * body;
@@ -1245,7 +1245,7 @@ num_loop_branches (const struct loop *loop)
/* Adds basic block BB to LOOP. */
void
-add_bb_to_loop (basic_block bb, struct loop *loop)
+add_bb_to_loop (basic_block bb, class loop *loop)
{
unsigned i;
loop_p ploop;
@@ -1273,7 +1273,7 @@ void
remove_bb_from_loops (basic_block bb)
{
unsigned i;
- struct loop *loop = bb->loop_father;
+ class loop *loop = bb->loop_father;
loop_p ploop;
edge_iterator ei;
edge e;
@@ -1295,8 +1295,8 @@ remove_bb_from_loops (basic_block bb)
}
/* Finds nearest common ancestor in loop tree for given loops. */
-struct loop *
-find_common_loop (struct loop *loop_s, struct loop *loop_d)
+class loop *
+find_common_loop (class loop *loop_s, class loop *loop_d)
{
unsigned sdepth, ddepth;
@@ -1322,7 +1322,7 @@ find_common_loop (struct loop *loop_s, struct loop *loop_d)
/* Removes LOOP from structures and frees its data. */
void
-delete_loop (struct loop *loop)
+delete_loop (class loop *loop)
{
/* Remove the loop from structure. */
flow_loop_tree_node_remove (loop);
@@ -1337,11 +1337,11 @@ delete_loop (struct loop *loop)
/* Cancels the LOOP; it must be innermost one. */
static void
-cancel_loop (struct loop *loop)
+cancel_loop (class loop *loop)
{
basic_block *bbs;
unsigned i;
- struct loop *outer = loop_outer (loop);
+ class loop *outer = loop_outer (loop);
gcc_assert (!loop->inner);
@@ -1356,7 +1356,7 @@ cancel_loop (struct loop *loop)
/* Cancels LOOP and all its subloops. */
void
-cancel_loop_tree (struct loop *loop)
+cancel_loop_tree (class loop *loop)
{
while (loop->inner)
cancel_loop_tree (loop->inner);
@@ -1385,7 +1385,7 @@ verify_loop_structure (void)
{
unsigned *sizes, i, j;
basic_block bb, *bbs;
- struct loop *loop;
+ class loop *loop;
int err = 0;
edge e;
unsigned num = number_of_loops (cfun);
@@ -1727,14 +1727,14 @@ verify_loop_structure (void)
/* Returns latch edge of LOOP. */
edge
-loop_latch_edge (const struct loop *loop)
+loop_latch_edge (const class loop *loop)
{
return find_edge (loop->latch, loop->header);
}
/* Returns preheader edge of LOOP. */
edge
-loop_preheader_edge (const struct loop *loop)
+loop_preheader_edge (const class loop *loop)
{
edge e;
edge_iterator ei;
@@ -1758,7 +1758,7 @@ loop_preheader_edge (const struct loop *loop)
/* Returns true if E is an exit of LOOP. */
bool
-loop_exit_edge_p (const struct loop *loop, const_edge e)
+loop_exit_edge_p (const class loop *loop, const_edge e)
{
return (flow_bb_inside_loop_p (loop, e->src)
&& !flow_bb_inside_loop_p (loop, e->dest));
@@ -1769,7 +1769,7 @@ loop_exit_edge_p (const struct loop *loop, const_edge e)
is returned always. */
edge
-single_exit (const struct loop *loop)
+single_exit (const class loop *loop)
{
struct loop_exit *exit = loop->exits->next;
@@ -1785,7 +1785,7 @@ single_exit (const struct loop *loop)
/* Returns true when BB has an incoming edge exiting LOOP. */
bool
-loop_exits_to_bb_p (struct loop *loop, basic_block bb)
+loop_exits_to_bb_p (class loop *loop, basic_block bb)
{
edge e;
edge_iterator ei;
@@ -1800,7 +1800,7 @@ loop_exits_to_bb_p (struct loop *loop, basic_block bb)
/* Returns true when BB has an outgoing edge exiting LOOP. */
bool
-loop_exits_from_bb_p (struct loop *loop, basic_block bb)
+loop_exits_from_bb_p (class loop *loop, basic_block bb)
{
edge e;
edge_iterator ei;
@@ -1815,10 +1815,10 @@ loop_exits_from_bb_p (struct loop *loop, basic_block bb)
/* Return location corresponding to the loop control condition if possible. */
dump_user_location_t
-get_loop_location (struct loop *loop)
+get_loop_location (class loop *loop)
{
rtx_insn *insn = NULL;
- struct niter_desc *desc = NULL;
+ class niter_desc *desc = NULL;
edge exit;
/* For a for or while loop, we would like to return the location
@@ -1869,7 +1869,7 @@ get_loop_location (struct loop *loop)
I_BOUND times. */
void
-record_niter_bound (struct loop *loop, const widest_int &i_bound,
+record_niter_bound (class loop *loop, const widest_int &i_bound,
bool realistic, bool upper)
{
/* Update the bounds only when there is no previous estimation, or when the
@@ -1920,7 +1920,7 @@ record_niter_bound (struct loop *loop, const widest_int &i_bound,
on the number of iterations of LOOP could not be derived, returns -1. */
HOST_WIDE_INT
-get_estimated_loop_iterations_int (struct loop *loop)
+get_estimated_loop_iterations_int (class loop *loop)
{
widest_int nit;
HOST_WIDE_INT hwi_nit;
@@ -1940,7 +1940,7 @@ get_estimated_loop_iterations_int (struct loop *loop)
the number of execution of the latch by one. */
HOST_WIDE_INT
-max_stmt_executions_int (struct loop *loop)
+max_stmt_executions_int (class loop *loop)
{
HOST_WIDE_INT nit = get_max_loop_iterations_int (loop);
HOST_WIDE_INT snit;
@@ -1959,7 +1959,7 @@ max_stmt_executions_int (struct loop *loop)
the number of execution of the latch by one. */
HOST_WIDE_INT
-likely_max_stmt_executions_int (struct loop *loop)
+likely_max_stmt_executions_int (class loop *loop)
{
HOST_WIDE_INT nit = get_likely_max_loop_iterations_int (loop);
HOST_WIDE_INT snit;
@@ -1978,7 +1978,7 @@ likely_max_stmt_executions_int (struct loop *loop)
returns true. */
bool
-get_estimated_loop_iterations (struct loop *loop, widest_int *nit)
+get_estimated_loop_iterations (class loop *loop, widest_int *nit)
{
/* Even if the bound is not recorded, possibly we can derrive one from
profile. */
@@ -2002,7 +2002,7 @@ get_estimated_loop_iterations (struct loop *loop, widest_int *nit)
false, otherwise returns true. */
bool
-get_max_loop_iterations (const struct loop *loop, widest_int *nit)
+get_max_loop_iterations (const class loop *loop, widest_int *nit)
{
if (!loop->any_upper_bound)
return false;
@@ -2016,7 +2016,7 @@ get_max_loop_iterations (const struct loop *loop, widest_int *nit)
on the number of iterations of LOOP could not be derived, returns -1. */
HOST_WIDE_INT
-get_max_loop_iterations_int (const struct loop *loop)
+get_max_loop_iterations_int (const class loop *loop)
{
widest_int nit;
HOST_WIDE_INT hwi_nit;
@@ -2036,7 +2036,7 @@ get_max_loop_iterations_int (const struct loop *loop)
false, otherwise returns true. */
bool
-get_likely_max_loop_iterations (struct loop *loop, widest_int *nit)
+get_likely_max_loop_iterations (class loop *loop, widest_int *nit)
{
if (!loop->any_likely_upper_bound)
return false;
@@ -2050,7 +2050,7 @@ get_likely_max_loop_iterations (struct loop *loop, widest_int *nit)
on the number of iterations of LOOP could not be derived, returns -1. */
HOST_WIDE_INT
-get_likely_max_loop_iterations_int (struct loop *loop)
+get_likely_max_loop_iterations_int (class loop *loop)
{
widest_int nit;
HOST_WIDE_INT hwi_nit;
diff --git a/gcc/cfgloop.h b/gcc/cfgloop.h
index 94140702055..0b0154ffd7b 100644
--- a/gcc/cfgloop.h
+++ b/gcc/cfgloop.h
@@ -66,7 +66,7 @@ public:
bool is_exit;
/* The next bound in the list. */
- struct nb_iter_bound *next;
+ class nb_iter_bound *next;
};
/* Description of the loop exit. */
@@ -92,7 +92,7 @@ struct loop_exit_hasher : ggc_ptr_hash<loop_exit>
static void remove (loop_exit *);
};
-typedef struct loop *loop_p;
+typedef class loop *loop_p;
/* An integer estimation of the number of iterations. Estimate_state
describes what is the state of the estimation. */
@@ -142,10 +142,10 @@ public:
vec<loop_p, va_gc> *superloops;
/* The first inner (child) loop or NULL if innermost loop. */
- struct loop *inner;
+ class loop *inner;
/* Link to the next (sibling) loop. */
- struct loop *next;
+ class loop *next;
/* Auxiliary info specific to a pass. */
PTR GTY ((skip (""))) aux;
@@ -252,7 +252,7 @@ public:
int orig_loop_num;
/* Upper bound on number of iterations of a loop. */
- struct nb_iter_bound *bounds;
+ class nb_iter_bound *bounds;
/* Non-overflow control ivs of a loop. */
struct control_iv *control_ivs;
@@ -261,7 +261,7 @@ public:
struct loop_exit *exits;
/* Number of iteration analysis data for RTL. */
- struct niter_desc *simple_loop_desc;
+ class niter_desc *simple_loop_desc;
/* For sanity checking during loop fixup we record here the former
loop header for loops marked for removal. Note that this prevents
@@ -277,21 +277,21 @@ public:
/* Set C to the LOOP constraint. */
static inline void
-loop_constraint_set (struct loop *loop, unsigned c)
+loop_constraint_set (class loop *loop, unsigned c)
{
loop->constraints |= c;
}
/* Clear C from the LOOP constraint. */
static inline void
-loop_constraint_clear (struct loop *loop, unsigned c)
+loop_constraint_clear (class loop *loop, unsigned c)
{
loop->constraints &= ~c;
}
/* Check if C is set in the LOOP constraint. */
static inline bool
-loop_constraint_set_p (struct loop *loop, unsigned c)
+loop_constraint_set_p (class loop *loop, unsigned c)
{
return (loop->constraints & c) == c;
}
@@ -327,7 +327,7 @@ struct GTY (()) loops {
hash_table<loop_exit_hasher> *GTY(()) exits;
/* Pointer to root of loop hierarchy tree. */
- struct loop *tree_root;
+ class loop *tree_root;
};
/* Loop recognition. */
@@ -337,12 +337,12 @@ extern struct loops *flow_loops_find (struct loops *);
extern void disambiguate_loops_with_multiple_latches (void);
extern void flow_loops_free (struct loops *);
extern void flow_loops_dump (FILE *,
- void (*)(const struct loop *, FILE *, int), int);
-extern void flow_loop_dump (const struct loop *, FILE *,
- void (*)(const struct loop *, FILE *, int), int);
-struct loop *alloc_loop (void);
-extern void flow_loop_free (struct loop *);
-int flow_loop_nodes_find (basic_block, struct loop *);
+ void (*)(const class loop *, FILE *, int), int);
+extern void flow_loop_dump (const class loop *, FILE *,
+ void (*)(const class loop *, FILE *, int), int);
+class loop *alloc_loop (void);
+extern void flow_loop_free (class loop *);
+int flow_loop_nodes_find (basic_block, class loop *);
unsigned fix_loop_structure (bitmap changed_bbs);
bool mark_irreducible_loops (void);
void release_recorded_exits (function *);
@@ -351,54 +351,54 @@ void rescan_loop_exit (edge, bool, bool);
void sort_sibling_loops (function *);
/* Loop data structure manipulation/querying. */
-extern void flow_loop_tree_node_add (struct loop *, struct loop *,
- struct loop * = NULL);
-extern void flow_loop_tree_node_remove (struct loop *);
-extern bool flow_loop_nested_p (const struct loop *, const struct loop *);
-extern bool flow_bb_inside_loop_p (const struct loop *, const_basic_block);
-extern struct loop * find_common_loop (struct loop *, struct loop *);
-struct loop *superloop_at_depth (struct loop *, unsigned);
+extern void flow_loop_tree_node_add (class loop *, class loop *,
+ class loop * = NULL);
+extern void flow_loop_tree_node_remove (class loop *);
+extern bool flow_loop_nested_p (const class loop *, const class loop *);
+extern bool flow_bb_inside_loop_p (const class loop *, const_basic_block);
+extern class loop * find_common_loop (class loop *, class loop *);
+class loop *superloop_at_depth (class loop *, unsigned);
struct eni_weights;
-extern int num_loop_insns (const struct loop *);
-extern int average_num_loop_insns (const struct loop *);
-extern unsigned get_loop_level (const struct loop *);
-extern bool loop_exit_edge_p (const struct loop *, const_edge);
-extern bool loop_exits_to_bb_p (struct loop *, basic_block);
-extern bool loop_exits_from_bb_p (struct loop *, basic_block);
+extern int num_loop_insns (const class loop *);
+extern int average_num_loop_insns (const class loop *);
+extern unsigned get_loop_level (const class loop *);
+extern bool loop_exit_edge_p (const class loop *, const_edge);
+extern bool loop_exits_to_bb_p (class loop *, basic_block);
+extern bool loop_exits_from_bb_p (class loop *, basic_block);
extern void mark_loop_exit_edges (void);
-extern dump_user_location_t get_loop_location (struct loop *loop);
+extern dump_user_location_t get_loop_location (class loop *loop);
/* Loops & cfg manipulation. */
-extern basic_block *get_loop_body (const struct loop *);
-extern unsigned get_loop_body_with_size (const struct loop *, basic_block *,
+extern basic_block *get_loop_body (const class loop *);
+extern unsigned get_loop_body_with_size (const class loop *, basic_block *,
unsigned);
-extern basic_block *get_loop_body_in_dom_order (const struct loop *);
-extern basic_block *get_loop_body_in_bfs_order (const struct loop *);
-extern basic_block *get_loop_body_in_custom_order (const struct loop *,
+extern basic_block *get_loop_body_in_dom_order (const class loop *);
+extern basic_block *get_loop_body_in_bfs_order (const class loop *);
+extern basic_block *get_loop_body_in_custom_order (const class loop *,
int (*) (const void *, const void *));
-extern vec<edge> get_loop_exit_edges (const struct loop *);
-extern edge single_exit (const struct loop *);
-extern edge single_likely_exit (struct loop *loop);
-extern unsigned num_loop_branches (const struct loop *);
+extern vec<edge> get_loop_exit_edges (const class loop *);
+extern edge single_exit (const class loop *);
+extern edge single_likely_exit (class loop *loop);
+extern unsigned num_loop_branches (const class loop *);
-extern edge loop_preheader_edge (const struct loop *);
-extern edge loop_latch_edge (const struct loop *);
+extern edge loop_preheader_edge (const class loop *);
+extern edge loop_latch_edge (const class loop *);
-extern void add_bb_to_loop (basic_block, struct loop *);
+extern void add_bb_to_loop (basic_block, class loop *);
extern void remove_bb_from_loops (basic_block);
-extern void cancel_loop_tree (struct loop *);
-extern void delete_loop (struct loop *);
+extern void cancel_loop_tree (class loop *);
+extern void delete_loop (class loop *);
extern void verify_loop_structure (void);
/* Loop analysis. */
-extern bool just_once_each_iteration_p (const struct loop *, const_basic_block);
-gcov_type expected_loop_iterations_unbounded (const struct loop *,
+extern bool just_once_each_iteration_p (const class loop *, const_basic_block);
+gcov_type expected_loop_iterations_unbounded (const class loop *,
bool *read_profile_p = NULL, bool by_profile_only = false);
-extern unsigned expected_loop_iterations (struct loop *);
+extern unsigned expected_loop_iterations (class loop *);
extern rtx doloop_condition_get (rtx_insn *);
void mark_loop_for_removal (loop_p);
@@ -490,21 +490,21 @@ public:
rtx niter_expr;
};
-extern void iv_analysis_loop_init (struct loop *);
-extern bool iv_analyze (rtx_insn *, scalar_int_mode, rtx, struct rtx_iv *);
-extern bool iv_analyze_result (rtx_insn *, rtx, struct rtx_iv *);
+extern void iv_analysis_loop_init (class loop *);
+extern bool iv_analyze (rtx_insn *, scalar_int_mode, rtx, class rtx_iv *);
+extern bool iv_analyze_result (rtx_insn *, rtx, class rtx_iv *);
extern bool iv_analyze_expr (rtx_insn *, scalar_int_mode, rtx,
- struct rtx_iv *);
-extern rtx get_iv_value (struct rtx_iv *, rtx);
+ class rtx_iv *);
+extern rtx get_iv_value (class rtx_iv *, rtx);
extern bool biv_p (rtx_insn *, scalar_int_mode, rtx);
-extern void find_simple_exit (struct loop *, struct niter_desc *);
+extern void find_simple_exit (class loop *, class niter_desc *);
extern void iv_analysis_done (void);
-extern struct niter_desc *get_simple_loop_desc (struct loop *loop);
-extern void free_simple_loop_desc (struct loop *loop);
+extern class niter_desc *get_simple_loop_desc (class loop *loop);
+extern void free_simple_loop_desc (class loop *loop);
-static inline struct niter_desc *
-simple_loop_desc (struct loop *loop)
+static inline class niter_desc *
+simple_loop_desc (class loop *loop)
{
return loop->simple_loop_desc;
}
@@ -513,7 +513,7 @@ simple_loop_desc (struct loop *loop)
/* Returns the loop with index NUM from FNs loop tree. */
-static inline struct loop *
+static inline class loop *
get_loop (struct function *fn, unsigned num)
{
return (*loops_for_fn (fn)->larray)[num];
@@ -522,7 +522,7 @@ get_loop (struct function *fn, unsigned num)
/* Returns the number of superloops of LOOP. */
static inline unsigned
-loop_depth (const struct loop *loop)
+loop_depth (const class loop *loop)
{
return vec_safe_length (loop->superloops);
}
@@ -530,8 +530,8 @@ loop_depth (const struct loop *loop)
/* Returns the immediate superloop of LOOP, or NULL if LOOP is the outermost
loop. */
-static inline struct loop *
-loop_outer (const struct loop *loop)
+static inline class loop *
+loop_outer (const class loop *loop)
{
unsigned n = vec_safe_length (loop->superloops);
@@ -544,7 +544,7 @@ loop_outer (const struct loop *loop)
/* Returns true if LOOP has at least one exit edge. */
static inline bool
-loop_has_exit_edges (const struct loop *loop)
+loop_has_exit_edges (const class loop *loop)
{
return loop->exits->next->e != NULL;
}
@@ -692,7 +692,7 @@ loop_iterator::next ()
inline
loop_iterator::loop_iterator (function *fn, loop_p *loop, unsigned flags)
{
- struct loop *aloop;
+ class loop *aloop;
unsigned i;
int mn;
@@ -843,11 +843,11 @@ enum
extern void doloop_optimize_loops (void);
extern void move_loop_invariants (void);
-extern vec<basic_block> get_loop_hot_path (const struct loop *loop);
+extern vec<basic_block> get_loop_hot_path (const class loop *loop);
/* Returns the outermost loop of the loop nest that contains LOOP.*/
-static inline struct loop *
-loop_outermost (struct loop *loop)
+static inline class loop *
+loop_outermost (class loop *loop)
{
unsigned n = vec_safe_length (loop->superloops);
@@ -857,13 +857,13 @@ loop_outermost (struct loop *loop)
return (*loop->superloops)[1];
}
-extern void record_niter_bound (struct loop *, const widest_int &, bool, bool);
-extern HOST_WIDE_INT get_estimated_loop_iterations_int (struct loop *);
-extern HOST_WIDE_INT get_max_loop_iterations_int (const struct loop *);
-extern HOST_WIDE_INT get_likely_max_loop_iterations_int (struct loop *);
-extern bool get_estimated_loop_iterations (struct loop *loop, widest_int *nit);
-extern bool get_max_loop_iterations (const struct loop *loop, widest_int *nit);
-extern bool get_likely_max_loop_iterations (struct loop *loop, widest_int *nit);
+extern void record_niter_bound (class loop *, const widest_int &, bool, bool);
+extern HOST_WIDE_INT get_estimated_loop_iterations_int (class loop *);
+extern HOST_WIDE_INT get_max_loop_iterations_int (const class loop *);
+extern HOST_WIDE_INT get_likely_max_loop_iterations_int (class loop *);
+extern bool get_estimated_loop_iterations (class loop *loop, widest_int *nit);
+extern bool get_max_loop_iterations (const class loop *loop, widest_int *nit);
+extern bool get_likely_max_loop_iterations (class loop *loop, widest_int *nit);
extern int bb_loop_depth (const_basic_block);
/* Converts VAL to widest_int. */
diff --git a/gcc/cfgloopanal.c b/gcc/cfgloopanal.c
index 6dbe96f9d3d..10037f0b1b0 100644
--- a/gcc/cfgloopanal.c
+++ b/gcc/cfgloopanal.c
@@ -41,7 +41,7 @@ struct target_cfgloop *this_target_cfgloop = &default_target_cfgloop;
/* Checks whether BB is executed exactly once in each LOOP iteration. */
bool
-just_once_each_iteration_p (const struct loop *loop, const_basic_block bb)
+just_once_each_iteration_p (const class loop *loop, const_basic_block bb)
{
/* It must be executed at least once each iteration. */
if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
@@ -81,7 +81,7 @@ mark_irreducible_loops (void)
unsigned depth;
struct graph *g;
int num = number_of_loops (cfun);
- struct loop *cloop;
+ class loop *cloop;
bool irred_loop_found = false;
int i;
@@ -173,7 +173,7 @@ mark_irreducible_loops (void)
/* Counts number of insns inside LOOP. */
int
-num_loop_insns (const struct loop *loop)
+num_loop_insns (const class loop *loop)
{
basic_block *bbs, bb;
unsigned i, ninsns = 0;
@@ -197,7 +197,7 @@ num_loop_insns (const struct loop *loop)
/* Counts number of insns executed on average per iteration LOOP. */
int
-average_num_loop_insns (const struct loop *loop)
+average_num_loop_insns (const class loop *loop)
{
basic_block *bbs, bb;
unsigned i, binsns;
@@ -238,7 +238,7 @@ average_num_loop_insns (const struct loop *loop)
return -1 in those scenarios. */
gcov_type
-expected_loop_iterations_unbounded (const struct loop *loop,
+expected_loop_iterations_unbounded (const class loop *loop,
bool *read_profile_p,
bool by_profile_only)
{
@@ -310,7 +310,7 @@ expected_loop_iterations_unbounded (const struct loop *loop,
by REG_BR_PROB_BASE. */
unsigned
-expected_loop_iterations (struct loop *loop)
+expected_loop_iterations (class loop *loop)
{
gcov_type expected = expected_loop_iterations_unbounded (loop);
return (expected > REG_BR_PROB_BASE ? REG_BR_PROB_BASE : expected);
@@ -319,9 +319,9 @@ expected_loop_iterations (struct loop *loop)
/* Returns the maximum level of nesting of subloops of LOOP. */
unsigned
-get_loop_level (const struct loop *loop)
+get_loop_level (const class loop *loop)
{
- const struct loop *ploop;
+ const class loop *ploop;
unsigned mx = 0, l;
for (ploop = loop->inner; ploop; ploop = ploop->next)
@@ -463,7 +463,7 @@ mark_loop_exit_edges (void)
to noreturn call. */
edge
-single_likely_exit (struct loop *loop)
+single_likely_exit (class loop *loop)
{
edge found = single_exit (loop);
vec<edge> exits;
@@ -500,7 +500,7 @@ single_likely_exit (struct loop *loop)
header != latch, latch is the 1-st block. */
vec<basic_block>
-get_loop_hot_path (const struct loop *loop)
+get_loop_hot_path (const class loop *loop)
{
basic_block bb = loop->header;
vec<basic_block> path = vNULL;
diff --git a/gcc/cfgloopmanip.c b/gcc/cfgloopmanip.c
index b5f6a47fb80..727e951edea 100644
--- a/gcc/cfgloopmanip.c
+++ b/gcc/cfgloopmanip.c
@@ -32,13 +32,13 @@ along with GCC; see the file COPYING3. If not see
#include "tree-ssa-loop-manip.h"
#include "dumpfile.h"
-static void copy_loops_to (struct loop **, int,
- struct loop *);
+static void copy_loops_to (class loop **, int,
+ class loop *);
static void loop_redirect_edge (edge, basic_block);
static void remove_bbs (basic_block *, int);
static bool rpe_enum_p (const_basic_block, const void *);
static int find_path (edge, basic_block **);
-static void fix_loop_placements (struct loop *, bool *);
+static void fix_loop_placements (class loop *, bool *);
static bool fix_bb_placement (basic_block);
static void fix_bb_placements (basic_block, bool *, bitmap);
@@ -89,7 +89,7 @@ fix_bb_placement (basic_block bb)
{
edge e;
edge_iterator ei;
- struct loop *loop = current_loops->tree_root, *act;
+ class loop *loop = current_loops->tree_root, *act;
FOR_EACH_EDGE (e, ei, bb->succs)
{
@@ -122,12 +122,12 @@ fix_bb_placement (basic_block bb)
invalidate the information about irreducible regions. */
static bool
-fix_loop_placement (struct loop *loop, bool *irred_invalidated)
+fix_loop_placement (class loop *loop, bool *irred_invalidated)
{
unsigned i;
edge e;
vec<edge> exits = get_loop_exit_edges (loop);
- struct loop *father = current_loops->tree_root, *act;
+ class loop *father = current_loops->tree_root, *act;
bool ret = false;
FOR_EACH_VEC_ELT (exits, i, e)
@@ -182,7 +182,7 @@ fix_bb_placements (basic_block from,
bitmap loop_closed_ssa_invalidated)
{
basic_block *queue, *qtop, *qbeg, *qend;
- struct loop *base_loop, *target_loop;
+ class loop *base_loop, *target_loop;
edge e;
/* We pass through blocks back-reachable from FROM, testing whether some
@@ -255,7 +255,7 @@ fix_bb_placements (basic_block from,
FOR_EACH_EDGE (e, ei, from->preds)
{
basic_block pred = e->src;
- struct loop *nca;
+ class loop *nca;
if (e->flags & EDGE_IRREDUCIBLE_LOOP)
*irred_invalidated = true;
@@ -307,7 +307,7 @@ remove_path (edge e, bool *irred_invalidated,
int i, nrem, n_bord_bbs;
bool local_irred_invalidated = false;
edge_iterator ei;
- struct loop *l, *f;
+ class loop *l, *f;
if (! irred_invalidated)
irred_invalidated = &local_irred_invalidated;
@@ -427,7 +427,7 @@ remove_path (edge e, bool *irred_invalidated,
/* Creates place for a new LOOP in loops structure of FN. */
void
-place_new_loop (struct function *fn, struct loop *loop)
+place_new_loop (struct function *fn, class loop *loop)
{
loop->num = number_of_loops (fn);
vec_safe_push (loops_for_fn (fn)->larray, loop);
@@ -438,11 +438,11 @@ place_new_loop (struct function *fn, struct loop *loop)
outer. */
void
-add_loop (struct loop *loop, struct loop *outer)
+add_loop (class loop *loop, class loop *outer)
{
basic_block *bbs;
int i, n;
- struct loop *subloop;
+ class loop *subloop;
edge e;
edge_iterator ei;
@@ -490,7 +490,7 @@ add_loop (struct loop *loop, struct loop *outer)
/* Scale profile of loop by P. */
void
-scale_loop_frequencies (struct loop *loop, profile_probability p)
+scale_loop_frequencies (class loop *loop, profile_probability p)
{
basic_block *bbs;
@@ -508,7 +508,7 @@ scale_loop_frequencies (struct loop *loop, profile_probability p)
they need to be scaled synchronously. */
void
-scale_loop_profile (struct loop *loop, profile_probability p,
+scale_loop_profile (class loop *loop, profile_probability p,
gcov_type iteration_bound)
{
edge e, preheader_e;
@@ -618,7 +618,7 @@ scale_loop_profile (struct loop *loop, profile_probability p,
/* Recompute dominance information for basic blocks outside LOOP. */
static void
-update_dominators_in_loop (struct loop *loop)
+update_dominators_in_loop (class loop *loop)
{
vec<basic_block> dom_bbs = vNULL;
basic_block *body;
@@ -763,17 +763,17 @@ create_empty_if_region_on_edge (edge entry_edge, tree condition)
should be used only when the UPPER_BOUND expression is a loop
invariant. */
-struct loop *
+class loop *
create_empty_loop_on_edge (edge entry_edge,
tree initial_value,
tree stride, tree upper_bound,
tree iv,
tree *iv_before,
tree *iv_after,
- struct loop *outer)
+ class loop *outer)
{
basic_block loop_header, loop_latch, succ_bb, pred_bb;
- struct loop *loop;
+ class loop *loop;
gimple_stmt_iterator gsi;
gimple_seq stmts;
gcond *cond_expr;
@@ -857,7 +857,7 @@ create_empty_loop_on_edge (edge entry_edge,
Returns the newly created loop. Frequencies and counts in the new loop
are scaled by FALSE_SCALE and in the old one by TRUE_SCALE. */
-struct loop *
+class loop *
loopify (edge latch_edge, edge header_edge,
basic_block switch_bb, edge true_edge, edge false_edge,
bool redirect_all_edges, profile_probability true_scale,
@@ -865,8 +865,8 @@ loopify (edge latch_edge, edge header_edge,
{
basic_block succ_bb = latch_edge->dest;
basic_block pred_bb = header_edge->src;
- struct loop *loop = alloc_loop ();
- struct loop *outer = loop_outer (succ_bb->loop_father);
+ class loop *loop = alloc_loop ();
+ class loop *outer = loop_outer (succ_bb->loop_father);
profile_count cnt;
loop->header = header_edge->dest;
@@ -923,11 +923,11 @@ loopify (edge latch_edge, edge header_edge,
basic blocks that had non-trivial update on their loop_father.*/
void
-unloop (struct loop *loop, bool *irred_invalidated,
+unloop (class loop *loop, bool *irred_invalidated,
bitmap loop_closed_ssa_invalidated)
{
basic_block *body;
- struct loop *ploop;
+ class loop *ploop;
unsigned i, n;
basic_block latch = loop->latch;
bool dummy = false;
@@ -978,9 +978,9 @@ unloop (struct loop *loop, bool *irred_invalidated,
invalidate the information about irreducible regions. */
static void
-fix_loop_placements (struct loop *loop, bool *irred_invalidated)
+fix_loop_placements (class loop *loop, bool *irred_invalidated)
{
- struct loop *outer;
+ class loop *outer;
while (loop_outer (loop))
{
@@ -1003,7 +1003,7 @@ fix_loop_placements (struct loop *loop, bool *irred_invalidated)
the loop into its duplicate. */
void
-copy_loop_info (struct loop *loop, struct loop *target)
+copy_loop_info (class loop *loop, class loop *target)
{
gcc_checking_assert (!target->any_upper_bound && !target->any_estimate);
target->any_upper_bound = loop->any_upper_bound;
@@ -1031,10 +1031,10 @@ copy_loop_info (struct loop *loop, struct loop *target)
created loop into loops structure. If AFTER is non-null
the new loop is added at AFTER->next, otherwise in front of TARGETs
sibling list. */
-struct loop *
-duplicate_loop (struct loop *loop, struct loop *target, struct loop *after)
+class loop *
+duplicate_loop (class loop *loop, class loop *target, class loop *after)
{
- struct loop *cloop;
+ class loop *cloop;
cloop = alloc_loop ();
place_new_loop (cfun, cloop);
@@ -1053,9 +1053,9 @@ duplicate_loop (struct loop *loop, struct loop *target, struct loop *after)
newly created loops into loop tree at the end of TARGETs sibling
list in the original order. */
void
-duplicate_subloops (struct loop *loop, struct loop *target)
+duplicate_subloops (class loop *loop, class loop *target)
{
- struct loop *aloop, *cloop, *tail;
+ class loop *aloop, *cloop, *tail;
for (tail = target->inner; tail && tail->next; tail = tail->next)
;
@@ -1072,9 +1072,9 @@ duplicate_subloops (struct loop *loop, struct loop *target)
into TARGET loop, placing newly created loops into loop tree adding
them to TARGETs sibling list at the end in order. */
static void
-copy_loops_to (struct loop **copied_loops, int n, struct loop *target)
+copy_loops_to (class loop **copied_loops, int n, class loop *target)
{
- struct loop *aloop, *tail;
+ class loop *aloop, *tail;
int i;
for (tail = target->inner; tail && tail->next; tail = tail->next)
@@ -1100,7 +1100,7 @@ loop_redirect_edge (edge e, basic_block dest)
/* Check whether LOOP's body can be duplicated. */
bool
-can_duplicate_loop_p (const struct loop *loop)
+can_duplicate_loop_p (const class loop *loop)
{
int ret;
basic_block *bbs = get_loop_body (loop);
@@ -1124,13 +1124,13 @@ can_duplicate_loop_p (const struct loop *loop)
impossible. */
bool
-duplicate_loop_to_header_edge (struct loop *loop, edge e,
+duplicate_loop_to_header_edge (class loop *loop, edge e,
unsigned int ndupl, sbitmap wont_exit,
edge orig, vec<edge> *to_remove,
int flags)
{
- struct loop *target, *aloop;
- struct loop **orig_loops;
+ class loop *target, *aloop;
+ class loop **orig_loops;
unsigned n_orig_loops;
basic_block header = loop->header, latch = loop->latch;
basic_block *new_bbs, *bbs, *first_active;
@@ -1276,7 +1276,7 @@ duplicate_loop_to_header_edge (struct loop *loop, edge e,
n_orig_loops = 0;
for (aloop = loop->inner; aloop; aloop = aloop->next)
n_orig_loops++;
- orig_loops = XNEWVEC (struct loop *, n_orig_loops);
+ orig_loops = XNEWVEC (class loop *, n_orig_loops);
for (aloop = loop->inner, i = 0; aloop; aloop = aloop->next, i++)
orig_loops[i] = aloop;
@@ -1453,7 +1453,7 @@ mfb_keep_just (edge e)
/* True when a candidate preheader BLOCK has predecessors from LOOP. */
static bool
-has_preds_from_loop (basic_block block, struct loop *loop)
+has_preds_from_loop (basic_block block, class loop *loop)
{
edge e;
edge_iterator ei;
@@ -1473,7 +1473,7 @@ has_preds_from_loop (basic_block block, struct loop *loop)
The function also updates dominators. */
basic_block
-create_preheader (struct loop *loop, int flags)
+create_preheader (class loop *loop, int flags)
{
edge e;
basic_block dummy;
@@ -1573,7 +1573,7 @@ create_preheader (struct loop *loop, int flags)
void
create_preheaders (int flags)
{
- struct loop *loop;
+ class loop *loop;
if (!current_loops)
return;
@@ -1588,7 +1588,7 @@ create_preheaders (int flags)
void
force_single_succ_latches (void)
{
- struct loop *loop;
+ class loop *loop;
edge e;
FOR_EACH_LOOP (loop, 0)
@@ -1677,8 +1677,8 @@ lv_adjust_loop_entry_edge (basic_block first_head, basic_block second_head,
If PLACE_AFTER is true, we place the new loop after LOOP in the
instruction stream, otherwise it is placed before LOOP. */
-struct loop *
-loop_version (struct loop *loop,
+class loop *
+loop_version (class loop *loop,
void *cond_expr, basic_block *condition_bb,
profile_probability then_prob, profile_probability else_prob,
profile_probability then_scale, profile_probability else_scale,
@@ -1687,7 +1687,7 @@ loop_version (struct loop *loop,
basic_block first_head, second_head;
edge entry, latch_edge, true_edge, false_edge;
int irred_flag;
- struct loop *nloop;
+ class loop *nloop;
basic_block cond_bb;
/* Record entry and latch edges for the loop */
diff --git a/gcc/cfgloopmanip.h b/gcc/cfgloopmanip.h
index d57aeadf035..d14f49078c0 100644
--- a/gcc/cfgloopmanip.h
+++ b/gcc/cfgloopmanip.h
@@ -35,30 +35,30 @@ enum
extern edge mfb_kj_edge;
extern bool remove_path (edge, bool * = NULL, bitmap = NULL);
-extern void place_new_loop (struct function *, struct loop *);
-extern void add_loop (struct loop *, struct loop *);
-extern void scale_loop_frequencies (struct loop *, profile_probability);
-extern void scale_loop_profile (struct loop *, profile_probability, gcov_type);
+extern void place_new_loop (struct function *, class loop *);
+extern void add_loop (class loop *, class loop *);
+extern void scale_loop_frequencies (class loop *, profile_probability);
+extern void scale_loop_profile (class loop *, profile_probability, gcov_type);
extern edge create_empty_if_region_on_edge (edge, tree);
-extern struct loop *create_empty_loop_on_edge (edge, tree, tree, tree, tree,
- tree *, tree *, struct loop *);
-extern struct loop *loopify (edge, edge,
+extern class loop *create_empty_loop_on_edge (edge, tree, tree, tree, tree,
+ tree *, tree *, class loop *);
+extern class loop *loopify (edge, edge,
basic_block, edge, edge, bool,
profile_probability, profile_probability);
-extern void unloop (struct loop *, bool *, bitmap);
-extern void copy_loop_info (struct loop *loop, struct loop *target);
-extern struct loop * duplicate_loop (struct loop *, struct loop *,
- struct loop * = NULL);
-extern void duplicate_subloops (struct loop *, struct loop *);
-extern bool can_duplicate_loop_p (const struct loop *loop);
-extern bool duplicate_loop_to_header_edge (struct loop *, edge,
+extern void unloop (class loop *, bool *, bitmap);
+extern void copy_loop_info (class loop *loop, class loop *target);
+extern class loop * duplicate_loop (class loop *, class loop *,
+ class loop * = NULL);
+extern void duplicate_subloops (class loop *, class loop *);
+extern bool can_duplicate_loop_p (const class loop *loop);
+extern bool duplicate_loop_to_header_edge (class loop *, edge,
unsigned, sbitmap, edge,
vec<edge> *, int);
extern bool mfb_keep_just (edge);
-basic_block create_preheader (struct loop *, int);
+basic_block create_preheader (class loop *, int);
extern void create_preheaders (int);
extern void force_single_succ_latches (void);
-struct loop * loop_version (struct loop *, void *,
+class loop * loop_version (class loop *, void *,
basic_block *,
profile_probability, profile_probability,
profile_probability, profile_probability, bool);
diff --git a/gcc/cgraph.h b/gcc/cgraph.h
index e5fd6dd133c..fa5224fb3a5 100644
--- a/gcc/cgraph.h
+++ b/gcc/cgraph.h
@@ -912,9 +912,8 @@ struct cgraph_edge_hasher : ggc_ptr_hash<cgraph_edge>
/* The cgraph data structure.
Each function decl has assigned cgraph_node listing callees and callers. */
-class GTY((tag ("SYMTAB_FUNCTION"))) cgraph_node : public symtab_node
+struct GTY((tag ("SYMTAB_FUNCTION"))) cgraph_node : public symtab_node
{
-public:
friend class symbol_table;
/* Remove the node from cgraph and all inline clones inlined into it.
@@ -1506,7 +1505,7 @@ struct cgraph_node_set_def
typedef cgraph_node_set_def *cgraph_node_set;
typedef struct varpool_node_set_def *varpool_node_set;
-class varpool_node;
+struct varpool_node;
/* A varpool node set is a collection of varpool nodes. A varpool node
can appear in multiple sets. */
@@ -1620,7 +1619,7 @@ public:
/* LTO streaming. */
void stream_out (struct output_block *) const;
- void stream_in (struct lto_input_block *, struct data_in *data_in);
+ void stream_in (class lto_input_block *, class data_in *data_in);
private:
bool combine_speculation_with (tree, HOST_WIDE_INT, bool, tree);
@@ -1679,7 +1678,7 @@ class GTY((chain_next ("%h.next_caller"), chain_prev ("%h.prev_caller"),
for_user)) cgraph_edge
{
public:
- friend class cgraph_node;
+ friend struct cgraph_node;
friend class symbol_table;
/* Remove the edge in the cgraph. */
@@ -2078,9 +2077,9 @@ struct asmname_hasher : ggc_ptr_hash <symtab_node>
class GTY((tag ("SYMTAB"))) symbol_table
{
public:
- friend class symtab_node;
- friend class cgraph_node;
- friend class cgraph_edge;
+ friend struct symtab_node;
+ friend struct cgraph_node;
+ friend struct cgraph_edge;
symbol_table (): cgraph_max_uid (1), cgraph_max_summary_id (0),
edges_max_uid (1), edges_max_summary_id (0)
diff --git a/gcc/cgraphbuild.c b/gcc/cgraphbuild.c
index 97c010531bb..2e7d0b5fe95 100644
--- a/gcc/cgraphbuild.c
+++ b/gcc/cgraphbuild.c
@@ -36,7 +36,7 @@ along with GCC; see the file COPYING3. If not see
struct record_reference_ctx
{
bool only_vars;
- class varpool_node *varpool_node;
+ struct varpool_node *varpool_node;
};
/* Walk tree and record all calls and references to functions/variables.
diff --git a/gcc/combine.c b/gcc/combine.c
index 1be922df614..f7b1ebc8cc0 100644
--- a/gcc/combine.c
+++ b/gcc/combine.c
@@ -7829,7 +7829,7 @@ make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos,
For memory, assume that the desired extraction_mode and pos_mode
are the same as for a register operation, since at present we don't
have named patterns for aligned memory structures. */
- struct extraction_insn insn;
+ class extraction_insn insn;
unsigned int inner_size;
if (GET_MODE_BITSIZE (inner_mode).is_constant (&inner_size)
&& get_best_reg_extraction_insn (&insn, pattern, inner_size, mode))
diff --git a/gcc/config/i386/i386-features.c b/gcc/config/i386/i386-features.c
index 2eac8f715bb..6ccd42a1874 100644
--- a/gcc/config/i386/i386-features.c
+++ b/gcc/config/i386/i386-features.c
@@ -152,7 +152,7 @@ const xlogue_layout xlogue_layout::s_instances[XLOGUE_SET_COUNT] = {
/* Return an appropriate const instance of xlogue_layout based upon values
in cfun->machine and crtl. */
-const struct xlogue_layout &
+const class xlogue_layout &
xlogue_layout::get_instance ()
{
enum xlogue_stub_sets stub_set;
diff --git a/gcc/config/i386/i386-features.h b/gcc/config/i386/i386-features.h
index 35812224997..f2c742fc0f7 100644
--- a/gcc/config/i386/i386-features.h
+++ b/gcc/config/i386/i386-features.h
@@ -84,7 +84,7 @@ public:
return STUB_INDEX_OFFSET + m_stack_align_off_in;
}
- static const struct xlogue_layout &get_instance ();
+ static const class xlogue_layout &get_instance ();
static unsigned count_stub_managed_regs ();
static bool is_stub_managed_reg (unsigned regno, unsigned count);
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index 49f49c5f8d0..e278d9c76df 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -7689,7 +7689,7 @@ ix86_emit_outlined_ms2sysv_save (const struct ix86_frame &frame)
rtx_insn *insn;
rtx sym, addr;
rtx rax = gen_rtx_REG (word_mode, AX_REG);
- const struct xlogue_layout &xlogue = xlogue_layout::get_instance ();
+ const class xlogue_layout &xlogue = xlogue_layout::get_instance ();
/* AL should only be live with sysv_abi. */
gcc_assert (!ix86_eax_live_at_start_p ());
@@ -8492,7 +8492,7 @@ ix86_emit_outlined_ms2sysv_restore (const struct ix86_frame &frame,
rtx sym, tmp;
rtx rsi = gen_rtx_REG (word_mode, SI_REG);
rtx r10 = NULL_RTX;
- const struct xlogue_layout &xlogue = xlogue_layout::get_instance ();
+ const class xlogue_layout &xlogue = xlogue_layout::get_instance ();
HOST_WIDE_INT stub_ptr_offset = xlogue.get_stub_ptr_offset ();
HOST_WIDE_INT rsi_offset = frame.stack_realign_offset + stub_ptr_offset;
rtx rsi_frame_load = NULL_RTX;
@@ -21503,7 +21503,7 @@ ix86_noce_conversion_profitable_p (rtx_insn *seq, struct noce_if_info *if_info)
/* Implement targetm.vectorize.init_cost. */
static void *
-ix86_init_cost (struct loop *)
+ix86_init_cost (class loop *)
{
unsigned *cost = XNEWVEC (unsigned, 3);
cost[vect_prologue] = cost[vect_body] = cost[vect_epilogue] = 0;
@@ -21514,7 +21514,7 @@ ix86_init_cost (struct loop *)
static unsigned
ix86_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
- struct _stmt_vec_info *stmt_info, int misalign,
+ class _stmt_vec_info *stmt_info, int misalign,
enum vect_cost_model_location where)
{
unsigned *cost = (unsigned *) data;
@@ -21942,7 +21942,7 @@ ix86_simd_clone_usable (struct cgraph_node *node)
(value 32 is used) as a heuristic. */
static unsigned
-ix86_loop_unroll_adjust (unsigned nunroll, struct loop *loop)
+ix86_loop_unroll_adjust (unsigned nunroll, class loop *loop)
{
basic_block *bbs;
rtx_insn *insn;
diff --git a/gcc/configure.ac b/gcc/configure.ac
index 137d5b469c3..c620dd2f447 100644
--- a/gcc/configure.ac
+++ b/gcc/configure.ac
@@ -482,7 +482,8 @@ AC_ARG_ENABLE(build-format-warnings,
AS_IF([test $enable_build_format_warnings = no],
[wf_opt=-Wno-format],[wf_opt=])
ACX_PROG_CXX_WARNING_OPTS(
- m4_quote(m4_do([-W -Wall -Wno-narrowing -Wwrite-strings ],
+ m4_quote(m4_do([-W -Wall -Wclass-is-pod -Wmismatched-tags ],
+ [-Wno-narrowing -Wstruct-not-pod -Wwrite-strings ],
[-Wcast-qual -Wno-error=format-diag $wf_opt])),
[loose_warn])
ACX_PROG_CC_WARNING_OPTS(
diff --git a/gcc/coretypes.h b/gcc/coretypes.h
index 2f6b8599d7c..eac2f3931aa 100644
--- a/gcc/coretypes.h
+++ b/gcc/coretypes.h
@@ -47,9 +47,9 @@ typedef int64_t gcov_type;
typedef uint64_t gcov_type_unsigned;
struct bitmap_obstack;
-struct bitmap_head;
-typedef struct bitmap_head *bitmap;
-typedef const struct bitmap_head *const_bitmap;
+class bitmap_head;
+typedef class bitmap_head *bitmap;
+typedef const class bitmap_head *const_bitmap;
struct simple_bitmap_def;
typedef struct simple_bitmap_def *sbitmap;
typedef const struct simple_bitmap_def *const_sbitmap;
@@ -65,7 +65,7 @@ template<typename> class opt_mode;
typedef opt_mode<scalar_mode> opt_scalar_mode;
typedef opt_mode<scalar_int_mode> opt_scalar_int_mode;
typedef opt_mode<scalar_float_mode> opt_scalar_float_mode;
-template<typename> class pod_mode;
+template<typename> struct pod_mode;
typedef pod_mode<scalar_mode> scalar_mode_pod;
typedef pod_mode<scalar_int_mode> scalar_int_mode_pod;
typedef pod_mode<fixed_size_mode> fixed_size_mode_pod;
@@ -73,19 +73,19 @@ typedef pod_mode<fixed_size_mode> fixed_size_mode_pod;
/* Subclasses of rtx_def, using indentation to show the class
hierarchy, along with the relevant invariant.
Where possible, keep this list in the same order as in rtl.def. */
-class rtx_def;
- class rtx_expr_list; /* GET_CODE (X) == EXPR_LIST */
- class rtx_insn_list; /* GET_CODE (X) == INSN_LIST */
- class rtx_sequence; /* GET_CODE (X) == SEQUENCE */
- class rtx_insn;
- class rtx_debug_insn; /* DEBUG_INSN_P (X) */
- class rtx_nonjump_insn; /* NONJUMP_INSN_P (X) */
- class rtx_jump_insn; /* JUMP_P (X) */
- class rtx_call_insn; /* CALL_P (X) */
- class rtx_jump_table_data; /* JUMP_TABLE_DATA_P (X) */
- class rtx_barrier; /* BARRIER_P (X) */
- class rtx_code_label; /* LABEL_P (X) */
- class rtx_note; /* NOTE_P (X) */
+struct rtx_def;
+ struct rtx_expr_list; /* GET_CODE (X) == EXPR_LIST */
+ struct rtx_insn_list; /* GET_CODE (X) == INSN_LIST */
+ struct rtx_sequence; /* GET_CODE (X) == SEQUENCE */
+ struct rtx_insn;
+ struct rtx_debug_insn; /* DEBUG_INSN_P (X) */
+ struct rtx_nonjump_insn; /* NONJUMP_INSN_P (X) */
+ struct rtx_jump_insn; /* JUMP_P (X) */
+ struct rtx_call_insn; /* CALL_P (X) */
+ struct rtx_jump_table_data; /* JUMP_TABLE_DATA_P (X) */
+ struct rtx_barrier; /* BARRIER_P (X) */
+ struct rtx_code_label; /* LABEL_P (X) */
+ struct rtx_note; /* NOTE_P (X) */
struct rtvec_def;
typedef struct rtvec_def *rtvec;
@@ -138,9 +138,9 @@ struct gomp_teams;
/* Subclasses of symtab_node, using indentation to show the class
hierarchy. */
-class symtab_node;
+struct symtab_node;
struct cgraph_node;
- class varpool_node;
+ struct varpool_node;
union section;
typedef union section section;
@@ -151,7 +151,7 @@ struct cl_option;
struct cl_decoded_option;
struct cl_option_handlers;
struct diagnostic_context;
-struct pretty_printer;
+class pretty_printer;
/* Address space number for named address space support. */
typedef unsigned char addr_space_t;
@@ -298,9 +298,9 @@ enum warn_strict_overflow_code
set yet). */
typedef int alias_set_type;
-struct edge_def;
-typedef struct edge_def *edge;
-typedef const struct edge_def *const_edge;
+class edge_def;
+typedef class edge_def *edge;
+typedef const class edge_def *const_edge;
struct basic_block_def;
typedef struct basic_block_def *basic_block;
typedef const struct basic_block_def *const_basic_block;
diff --git a/gcc/cp/ChangeLog b/gcc/cp/ChangeLog
index 2dd3893f8ac..7609b206fbf 100644
--- a/gcc/cp/ChangeLog
+++ b/gcc/cp/ChangeLog
@@ -1,6 +1,13 @@
2019-07-09 Martin Sebor <msebor@redhat.com>
PR c++/61339
+ * cp-tree.h: Change class-key of PODs to struct and others to class.
+ * search.c: Same.
+ * semantics.c (finalize_nrv_r): Same.
+
+2019-07-09 Martin Sebor <msebor@redhat.com>
+
+ PR c++/61339
* constexpr.c (cxx_eval_call_expression): Change class-key from class
to struct and vice versa to match convention and avoid -Wclass-is-pod
and -Wstruct-no-pod.
diff --git a/gcc/cp/search.c b/gcc/cp/search.c
index a737e242b1c..b441af8ff09 100644
--- a/gcc/cp/search.c
+++ b/gcc/cp/search.c
@@ -1275,7 +1275,7 @@ tree
lookup_member_fuzzy (tree xbasetype, tree name, bool want_type_p)
{
tree type = NULL_TREE, basetype_path = NULL_TREE;
- struct lookup_field_fuzzy_info lffi (want_type_p);
+ class lookup_field_fuzzy_info lffi (want_type_p);
/* rval_binfo is the binfo associated with the found member, note,
this can be set with useful information, even when rval is not
diff --git a/gcc/cp/semantics.c b/gcc/cp/semantics.c
index ceb6c641750..aadfaffca2a 100644
--- a/gcc/cp/semantics.c
+++ b/gcc/cp/semantics.c
@@ -4395,7 +4395,7 @@ public:
static tree
finalize_nrv_r (tree* tp, int* walk_subtrees, void* data)
{
- struct nrv_data *dp = (struct nrv_data *)data;
+ class nrv_data *dp = (class nrv_data *)data;
tree_node **slot;
/* No need to walk into types. There wouldn't be any need to walk into
@@ -4453,7 +4453,7 @@ finalize_nrv_r (tree* tp, int* walk_subtrees, void* data)
void
finalize_nrv (tree *tp, tree var, tree result)
{
- struct nrv_data data;
+ class nrv_data data;
/* Copy name from VAR to RESULT. */
DECL_NAME (result) = DECL_NAME (var);
diff --git a/gcc/data-streamer-in.c b/gcc/data-streamer-in.c
index 9c79e1d332d..11ad084780b 100644
--- a/gcc/data-streamer-in.c
+++ b/gcc/data-streamer-in.c
@@ -33,7 +33,7 @@ along with GCC; see the file COPYING3. If not see
IB. Write the length to RLEN. */
static const char *
-string_for_index (struct data_in *data_in, unsigned int loc, unsigned int *rlen)
+string_for_index (class data_in *data_in, unsigned int loc, unsigned int *rlen)
{
unsigned int len;
const char *result;
@@ -62,8 +62,8 @@ string_for_index (struct data_in *data_in, unsigned int loc, unsigned int *rlen)
IB. Write the length to RLEN. */
const char *
-streamer_read_indexed_string (struct data_in *data_in,
- struct lto_input_block *ib, unsigned int *rlen)
+streamer_read_indexed_string (class data_in *data_in,
+ class lto_input_block *ib, unsigned int *rlen)
{
return string_for_index (data_in, streamer_read_uhwi (ib), rlen);
}
@@ -72,7 +72,7 @@ streamer_read_indexed_string (struct data_in *data_in,
/* Read a NULL terminated string from the string table in DATA_IN. */
const char *
-streamer_read_string (struct data_in *data_in, struct lto_input_block *ib)
+streamer_read_string (class data_in *data_in, class lto_input_block *ib)
{
unsigned int len;
const char *ptr;
@@ -91,7 +91,7 @@ streamer_read_string (struct data_in *data_in, struct lto_input_block *ib)
Write the length to RLEN. */
const char *
-bp_unpack_indexed_string (struct data_in *data_in,
+bp_unpack_indexed_string (class data_in *data_in,
struct bitpack_d *bp, unsigned int *rlen)
{
return string_for_index (data_in, bp_unpack_var_len_unsigned (bp), rlen);
@@ -101,7 +101,7 @@ bp_unpack_indexed_string (struct data_in *data_in,
/* Read a NULL terminated string from the string table in DATA_IN. */
const char *
-bp_unpack_string (struct data_in *data_in, struct bitpack_d *bp)
+bp_unpack_string (class data_in *data_in, struct bitpack_d *bp)
{
unsigned int len;
const char *ptr;
@@ -119,7 +119,7 @@ bp_unpack_string (struct data_in *data_in, struct bitpack_d *bp)
/* Read an unsigned HOST_WIDE_INT number from IB. */
unsigned HOST_WIDE_INT
-streamer_read_uhwi (struct lto_input_block *ib)
+streamer_read_uhwi (class lto_input_block *ib)
{
unsigned HOST_WIDE_INT result;
int shift;
@@ -154,7 +154,7 @@ streamer_read_uhwi (struct lto_input_block *ib)
/* Read a HOST_WIDE_INT number from IB. */
HOST_WIDE_INT
-streamer_read_hwi (struct lto_input_block *ib)
+streamer_read_hwi (class lto_input_block *ib)
{
HOST_WIDE_INT result = 0;
int shift = 0;
@@ -178,7 +178,7 @@ streamer_read_hwi (struct lto_input_block *ib)
/* Read gcov_type value from IB. */
gcov_type
-streamer_read_gcov_count (struct lto_input_block *ib)
+streamer_read_gcov_count (class lto_input_block *ib)
{
gcov_type ret = streamer_read_hwi (ib);
return ret;
@@ -188,7 +188,7 @@ streamer_read_gcov_count (struct lto_input_block *ib)
input block IB. */
wide_int
-streamer_read_wide_int (struct lto_input_block *ib)
+streamer_read_wide_int (class lto_input_block *ib)
{
HOST_WIDE_INT a[WIDE_INT_MAX_ELTS];
int i;
@@ -203,7 +203,7 @@ streamer_read_wide_int (struct lto_input_block *ib)
input block IB. */
widest_int
-streamer_read_widest_int (struct lto_input_block *ib)
+streamer_read_widest_int (class lto_input_block *ib)
{
HOST_WIDE_INT a[WIDE_INT_MAX_ELTS];
int i;
diff --git a/gcc/data-streamer.h b/gcc/data-streamer.h
index cb3efc1afe6..c8bfd9a7030 100644
--- a/gcc/data-streamer.h
+++ b/gcc/data-streamer.h
@@ -73,18 +73,18 @@ void streamer_write_wide_int (struct output_block *, const wide_int &);
void streamer_write_widest_int (struct output_block *, const widest_int &);
/* In data-streamer-in.c */
-const char *streamer_read_string (struct data_in *, struct lto_input_block *);
-const char *streamer_read_indexed_string (struct data_in *,
- struct lto_input_block *,
+const char *streamer_read_string (class data_in *, class lto_input_block *);
+const char *streamer_read_indexed_string (class data_in *,
+ class lto_input_block *,
unsigned int *);
-const char *bp_unpack_indexed_string (struct data_in *, struct bitpack_d *,
+const char *bp_unpack_indexed_string (class data_in *, struct bitpack_d *,
unsigned int *);
-const char *bp_unpack_string (struct data_in *, struct bitpack_d *);
-unsigned HOST_WIDE_INT streamer_read_uhwi (struct lto_input_block *);
-HOST_WIDE_INT streamer_read_hwi (struct lto_input_block *);
-gcov_type streamer_read_gcov_count (struct lto_input_block *);
-wide_int streamer_read_wide_int (struct lto_input_block *);
-widest_int streamer_read_widest_int (struct lto_input_block *);
+const char *bp_unpack_string (class data_in *, struct bitpack_d *);
+unsigned HOST_WIDE_INT streamer_read_uhwi (class lto_input_block *);
+HOST_WIDE_INT streamer_read_hwi (class lto_input_block *);
+gcov_type streamer_read_gcov_count (class lto_input_block *);
+wide_int streamer_read_wide_int (class lto_input_block *);
+widest_int streamer_read_widest_int (class lto_input_block *);
/* Returns a new bit-packing context for bit-packing into S. */
static inline struct bitpack_d
@@ -149,7 +149,7 @@ streamer_write_bitpack (struct bitpack_d *bp)
/* Returns a new bit-packing context for bit-unpacking from IB. */
static inline struct bitpack_d
-streamer_read_bitpack (struct lto_input_block *ib)
+streamer_read_bitpack (class lto_input_block *ib)
{
struct bitpack_d bp;
bp.word = streamer_read_uhwi (ib);
@@ -174,7 +174,7 @@ bp_unpack_value (struct bitpack_d *bp, unsigned nbits)
if (pos + nbits > BITS_PER_BITPACK_WORD)
{
bp->word = val
- = streamer_read_uhwi ((struct lto_input_block *)bp->stream);
+ = streamer_read_uhwi ((class lto_input_block *)bp->stream);
bp->pos = nbits;
return val & mask;
}
@@ -218,7 +218,7 @@ streamer_write_char_stream (struct lto_output_stream *obs, char c)
/* Read byte from the input block. */
static inline unsigned char
-streamer_read_uchar (struct lto_input_block *ib)
+streamer_read_uchar (class lto_input_block *ib)
{
if (ib->p >= ib->len)
lto_section_overrun (ib);
@@ -248,7 +248,7 @@ streamer_write_hwi_in_range (struct lto_output_stream *obs,
to be compile time constant. PURPOSE is used for error reporting. */
static inline HOST_WIDE_INT
-streamer_read_hwi_in_range (struct lto_input_block *ib,
+streamer_read_hwi_in_range (class lto_input_block *ib,
const char *purpose,
HOST_WIDE_INT min,
HOST_WIDE_INT max)
@@ -337,7 +337,7 @@ streamer_write_record_start (struct output_block *ob, enum LTO_tags tag)
/* Return the next tag in the input block IB. */
static inline enum LTO_tags
-streamer_read_record_start (struct lto_input_block *ib)
+streamer_read_record_start (class lto_input_block *ib)
{
return streamer_read_enum (ib, LTO_tags, LTO_NUM_TAGS);
}
diff --git a/gcc/ddg.c b/gcc/ddg.c
index 82554ed96cf..28b2be90f59 100644
--- a/gcc/ddg.c
+++ b/gcc/ddg.c
@@ -215,7 +215,7 @@ create_ddg_dep_from_intra_loop_link (ddg_ptr g, ddg_node_ptr src_node,
{
int regno = REGNO (SET_DEST (set));
df_ref first_def;
- struct df_rd_bb_info *bb_info = DF_RD_BB_INFO (g->bb);
+ class df_rd_bb_info *bb_info = DF_RD_BB_INFO (g->bb);
first_def = df_bb_regno_first_def_find (g->bb, regno);
gcc_assert (first_def);
@@ -288,7 +288,7 @@ add_cross_iteration_register_deps (ddg_ptr g, df_ref last_def)
if (flag_checking && DF_REF_ID (last_def) != DF_REF_ID (first_def))
{
- struct df_rd_bb_info *bb_info = DF_RD_BB_INFO (g->bb);
+ class df_rd_bb_info *bb_info = DF_RD_BB_INFO (g->bb);
gcc_assert (!bitmap_bit_p (&bb_info->gen, DF_REF_ID (first_def)));
}
@@ -369,7 +369,7 @@ static void
build_inter_loop_deps (ddg_ptr g)
{
unsigned rd_num;
- struct df_rd_bb_info *rd_bb_info;
+ class df_rd_bb_info *rd_bb_info;
bitmap_iterator bi;
rd_bb_info = DF_RD_BB_INFO (g->bb);
@@ -475,7 +475,7 @@ build_intra_loop_deps (ddg_ptr g)
{
int i;
/* Hold the dependency analysis state during dependency calculations. */
- struct deps_desc tmp_deps;
+ class deps_desc tmp_deps;
rtx_insn *head, *tail;
/* Build the dependence information, using the sched_analyze function. */
diff --git a/gcc/df-core.c b/gcc/df-core.c
index 7299bfd20de..44848aa1768 100644
--- a/gcc/df-core.c
+++ b/gcc/df-core.c
@@ -407,7 +407,7 @@ bitmap_obstack df_bitmap_obstack;
Functions to create, destroy and manipulate an instance of df.
----------------------------------------------------------------------------*/
-struct df_d *df;
+class df_d *df;
/* Add PROBLEM (and any dependent problems) to the DF instance. */
@@ -684,7 +684,7 @@ static unsigned int
rest_of_handle_df_initialize (void)
{
gcc_assert (!df);
- df = XCNEW (struct df_d);
+ df = XCNEW (class df_d);
df->changeable_flags = 0;
bitmap_obstack_initialize (&df_bitmap_obstack);
@@ -1293,7 +1293,7 @@ df_analyze (void)
Returns the number of blocks which is always loop->num_nodes. */
static int
-loop_post_order_compute (int *post_order, struct loop *loop)
+loop_post_order_compute (int *post_order, class loop *loop)
{
edge_iterator *stack;
int sp;
@@ -1354,7 +1354,7 @@ loop_post_order_compute (int *post_order, struct loop *loop)
by LOOP. Returns the number of blocks which is always loop->num_nodes. */
static void
-loop_inverted_post_order_compute (vec<int> *post_order, struct loop *loop)
+loop_inverted_post_order_compute (vec<int> *post_order, class loop *loop)
{
basic_block bb;
edge_iterator *stack;
@@ -1419,7 +1419,7 @@ loop_inverted_post_order_compute (vec<int> *post_order, struct loop *loop)
/* Analyze dataflow info for the basic blocks contained in LOOP. */
void
-df_analyze_loop (struct loop *loop)
+df_analyze_loop (class loop *loop)
{
free (df->postorder);
diff --git a/gcc/df-problems.c b/gcc/df-problems.c
index 62b2cf6162f..d32c688510c 100644
--- a/gcc/df-problems.c
+++ b/gcc/df-problems.c
@@ -162,7 +162,7 @@ static void
df_rd_free_bb_info (basic_block bb ATTRIBUTE_UNUSED,
void *vbb_info)
{
- struct df_rd_bb_info *bb_info = (struct df_rd_bb_info *) vbb_info;
+ class df_rd_bb_info *bb_info = (class df_rd_bb_info *) vbb_info;
if (bb_info)
{
bitmap_clear (&bb_info->kill);
@@ -182,17 +182,17 @@ df_rd_alloc (bitmap all_blocks)
{
unsigned int bb_index;
bitmap_iterator bi;
- struct df_rd_problem_data *problem_data;
+ class df_rd_problem_data *problem_data;
if (df_rd->problem_data)
{
- problem_data = (struct df_rd_problem_data *) df_rd->problem_data;
+ problem_data = (class df_rd_problem_data *) df_rd->problem_data;
bitmap_clear (&problem_data->sparse_invalidated_by_call);
bitmap_clear (&problem_data->dense_invalidated_by_call);
}
else
{
- problem_data = XNEW (struct df_rd_problem_data);
+ problem_data = XNEW (class df_rd_problem_data);
df_rd->problem_data = problem_data;
bitmap_obstack_initialize (&problem_data->rd_bitmaps);
@@ -209,7 +209,7 @@ df_rd_alloc (bitmap all_blocks)
EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
{
- struct df_rd_bb_info *bb_info = df_rd_get_bb_info (bb_index);
+ class df_rd_bb_info *bb_info = df_rd_get_bb_info (bb_index);
/* When bitmaps are already initialized, just clear them. */
if (bb_info->kill.obstack)
@@ -283,7 +283,7 @@ df_rd_simulate_one_insn (basic_block bb ATTRIBUTE_UNUSED, rtx_insn *insn,
of kill sets. */
static void
-df_rd_bb_local_compute_process_def (struct df_rd_bb_info *bb_info,
+df_rd_bb_local_compute_process_def (class df_rd_bb_info *bb_info,
df_ref def,
int top_flag)
{
@@ -340,7 +340,7 @@ static void
df_rd_bb_local_compute (unsigned int bb_index)
{
basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
- struct df_rd_bb_info *bb_info = df_rd_get_bb_info (bb_index);
+ class df_rd_bb_info *bb_info = df_rd_get_bb_info (bb_index);
rtx_insn *insn;
bitmap_clear (&seen_in_block);
@@ -390,8 +390,8 @@ df_rd_local_compute (bitmap all_blocks)
unsigned int bb_index;
bitmap_iterator bi;
unsigned int regno;
- struct df_rd_problem_data *problem_data
- = (struct df_rd_problem_data *) df_rd->problem_data;
+ class df_rd_problem_data *problem_data
+ = (class df_rd_problem_data *) df_rd->problem_data;
bitmap sparse_invalidated = &problem_data->sparse_invalidated_by_call;
bitmap dense_invalidated = &problem_data->dense_invalidated_by_call;
@@ -435,7 +435,7 @@ df_rd_init_solution (bitmap all_blocks)
EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
{
- struct df_rd_bb_info *bb_info = df_rd_get_bb_info (bb_index);
+ class df_rd_bb_info *bb_info = df_rd_get_bb_info (bb_index);
bitmap_copy (&bb_info->out, &bb_info->gen);
bitmap_clear (&bb_info->in);
@@ -456,8 +456,8 @@ df_rd_confluence_n (edge e)
if (e->flags & EDGE_EH)
{
- struct df_rd_problem_data *problem_data
- = (struct df_rd_problem_data *) df_rd->problem_data;
+ class df_rd_problem_data *problem_data
+ = (class df_rd_problem_data *) df_rd->problem_data;
bitmap sparse_invalidated = &problem_data->sparse_invalidated_by_call;
bitmap dense_invalidated = &problem_data->dense_invalidated_by_call;
bitmap_iterator bi;
@@ -485,7 +485,7 @@ df_rd_confluence_n (edge e)
static bool
df_rd_transfer_function (int bb_index)
{
- struct df_rd_bb_info *bb_info = df_rd_get_bb_info (bb_index);
+ class df_rd_bb_info *bb_info = df_rd_get_bb_info (bb_index);
unsigned int regno;
bitmap_iterator bi;
bitmap in = &bb_info->in;
@@ -499,12 +499,12 @@ df_rd_transfer_function (int bb_index)
changed = bitmap_ior_and_compl (out, gen, in, kill);
else
{
- struct df_rd_problem_data *problem_data;
+ class df_rd_problem_data *problem_data;
bitmap_head tmp;
/* Note that TMP is _not_ a temporary bitmap if we end up replacing
OUT with TMP. Therefore, allocate TMP in the RD bitmaps obstack. */
- problem_data = (struct df_rd_problem_data *) df_rd->problem_data;
+ problem_data = (class df_rd_problem_data *) df_rd->problem_data;
bitmap_initialize (&tmp, &problem_data->rd_bitmaps);
bitmap_and_compl (&tmp, in, kill);
@@ -528,7 +528,7 @@ df_rd_transfer_function (int bb_index)
basic block, and mask out DEFs of registers that are not live.
Computing the mask looks costly, but the benefit of the pruning
outweighs the cost. */
- struct df_rd_bb_info *bb_info = df_rd_get_bb_info (bb_index);
+ class df_rd_bb_info *bb_info = df_rd_get_bb_info (bb_index);
bitmap regs_live_out = &df_lr_get_bb_info (bb_index)->out;
bitmap live_defs = BITMAP_ALLOC (&df_bitmap_obstack);
unsigned int regno;
@@ -550,8 +550,8 @@ df_rd_transfer_function (int bb_index)
static void
df_rd_free (void)
{
- struct df_rd_problem_data *problem_data
- = (struct df_rd_problem_data *) df_rd->problem_data;
+ class df_rd_problem_data *problem_data
+ = (class df_rd_problem_data *) df_rd->problem_data;
if (problem_data)
{
@@ -571,8 +571,8 @@ df_rd_free (void)
static void
df_rd_start_dump (FILE *file)
{
- struct df_rd_problem_data *problem_data
- = (struct df_rd_problem_data *) df_rd->problem_data;
+ class df_rd_problem_data *problem_data
+ = (class df_rd_problem_data *) df_rd->problem_data;
unsigned int m = DF_REG_SIZE (df);
unsigned int regno;
@@ -644,7 +644,7 @@ df_rd_dump_defs_set (bitmap defs_set, const char *prefix, FILE *file)
static void
df_rd_top_dump (basic_block bb, FILE *file)
{
- struct df_rd_bb_info *bb_info = df_rd_get_bb_info (bb->index);
+ class df_rd_bb_info *bb_info = df_rd_get_bb_info (bb->index);
if (!bb_info)
return;
@@ -659,7 +659,7 @@ df_rd_top_dump (basic_block bb, FILE *file)
static void
df_rd_bottom_dump (basic_block bb, FILE *file)
{
- struct df_rd_bb_info *bb_info = df_rd_get_bb_info (bb->index);
+ class df_rd_bb_info *bb_info = df_rd_get_bb_info (bb->index);
if (!bb_info)
return;
@@ -692,7 +692,7 @@ static const struct df_problem problem_RD =
NULL, /* Incremental solution verify start. */
NULL, /* Incremental solution verify end. */
NULL, /* Dependent problem. */
- sizeof (struct df_rd_bb_info),/* Size of entry of block_info array. */
+ sizeof (class df_rd_bb_info),/* Size of entry of block_info array. */
TV_DF_RD, /* Timing variable. */
true /* Reset blocks on dropping out of blocks_to_analyze. */
};
@@ -734,7 +734,7 @@ static void
df_lr_free_bb_info (basic_block bb ATTRIBUTE_UNUSED,
void *vbb_info)
{
- struct df_lr_bb_info *bb_info = (struct df_lr_bb_info *) vbb_info;
+ class df_lr_bb_info *bb_info = (class df_lr_bb_info *) vbb_info;
if (bb_info)
{
bitmap_clear (&bb_info->use);
@@ -770,7 +770,7 @@ df_lr_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
EXECUTE_IF_SET_IN_BITMAP (df_lr->out_of_date_transfer_functions, 0, bb_index, bi)
{
- struct df_lr_bb_info *bb_info = df_lr_get_bb_info (bb_index);
+ class df_lr_bb_info *bb_info = df_lr_get_bb_info (bb_index);
/* When bitmaps are already initialized, just clear them. */
if (bb_info->use.obstack)
@@ -801,7 +801,7 @@ df_lr_reset (bitmap all_blocks)
EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
{
- struct df_lr_bb_info *bb_info = df_lr_get_bb_info (bb_index);
+ class df_lr_bb_info *bb_info = df_lr_get_bb_info (bb_index);
gcc_assert (bb_info);
bitmap_clear (&bb_info->in);
bitmap_clear (&bb_info->out);
@@ -815,7 +815,7 @@ static void
df_lr_bb_local_compute (unsigned int bb_index)
{
basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
- struct df_lr_bb_info *bb_info = df_lr_get_bb_info (bb_index);
+ class df_lr_bb_info *bb_info = df_lr_get_bb_info (bb_index);
rtx_insn *insn;
df_ref def, use;
@@ -930,7 +930,7 @@ df_lr_local_compute (bitmap all_blocks ATTRIBUTE_UNUSED)
{
/* The exit block is special for this problem and its bits are
computed from thin air. */
- struct df_lr_bb_info *bb_info = df_lr_get_bb_info (EXIT_BLOCK);
+ class df_lr_bb_info *bb_info = df_lr_get_bb_info (EXIT_BLOCK);
bitmap_copy (&bb_info->use, df->exit_block_uses);
}
else
@@ -951,7 +951,7 @@ df_lr_init (bitmap all_blocks)
EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
{
- struct df_lr_bb_info *bb_info = df_lr_get_bb_info (bb_index);
+ class df_lr_bb_info *bb_info = df_lr_get_bb_info (bb_index);
bitmap_copy (&bb_info->in, &bb_info->use);
bitmap_clear (&bb_info->out);
}
@@ -997,7 +997,7 @@ df_lr_confluence_n (edge e)
static bool
df_lr_transfer_function (int bb_index)
{
- struct df_lr_bb_info *bb_info = df_lr_get_bb_info (bb_index);
+ class df_lr_bb_info *bb_info = df_lr_get_bb_info (bb_index);
bitmap in = &bb_info->in;
bitmap out = &bb_info->out;
bitmap use = &bb_info->use;
@@ -1069,7 +1069,7 @@ df_lr_free (void)
static void
df_lr_top_dump (basic_block bb, FILE *file)
{
- struct df_lr_bb_info *bb_info = df_lr_get_bb_info (bb->index);
+ class df_lr_bb_info *bb_info = df_lr_get_bb_info (bb->index);
struct df_lr_problem_data *problem_data;
if (!bb_info)
return;
@@ -1097,7 +1097,7 @@ df_lr_top_dump (basic_block bb, FILE *file)
static void
df_lr_bottom_dump (basic_block bb, FILE *file)
{
- struct df_lr_bb_info *bb_info = df_lr_get_bb_info (bb->index);
+ class df_lr_bb_info *bb_info = df_lr_get_bb_info (bb->index);
struct df_lr_problem_data *problem_data;
if (!bb_info)
return;
@@ -1214,7 +1214,7 @@ static const struct df_problem problem_LR =
df_lr_verify_solution_start,/* Incremental solution verify start. */
df_lr_verify_solution_end, /* Incremental solution verify end. */
NULL, /* Dependent problem. */
- sizeof (struct df_lr_bb_info),/* Size of entry of block_info array. */
+ sizeof (class df_lr_bb_info),/* Size of entry of block_info array. */
TV_DF_LR, /* Timing variable. */
false /* Reset blocks on dropping out of blocks_to_analyze. */
};
@@ -1254,7 +1254,7 @@ df_lr_verify_transfer_functions (void)
FOR_ALL_BB_FN (bb, cfun)
{
- struct df_lr_bb_info *bb_info = df_lr_get_bb_info (bb->index);
+ class df_lr_bb_info *bb_info = df_lr_get_bb_info (bb->index);
bitmap_set_bit (&all_blocks, bb->index);
if (bb_info)
@@ -1340,7 +1340,7 @@ static void
df_live_free_bb_info (basic_block bb ATTRIBUTE_UNUSED,
void *vbb_info)
{
- struct df_live_bb_info *bb_info = (struct df_live_bb_info *) vbb_info;
+ class df_live_bb_info *bb_info = (class df_live_bb_info *) vbb_info;
if (bb_info)
{
bitmap_clear (&bb_info->gen);
@@ -1378,7 +1378,7 @@ df_live_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
EXECUTE_IF_SET_IN_BITMAP (df_live->out_of_date_transfer_functions, 0, bb_index, bi)
{
- struct df_live_bb_info *bb_info = df_live_get_bb_info (bb_index);
+ class df_live_bb_info *bb_info = df_live_get_bb_info (bb_index);
/* When bitmaps are already initialized, just clear them. */
if (bb_info->kill.obstack)
@@ -1408,7 +1408,7 @@ df_live_reset (bitmap all_blocks)
EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
{
- struct df_live_bb_info *bb_info = df_live_get_bb_info (bb_index);
+ class df_live_bb_info *bb_info = df_live_get_bb_info (bb_index);
gcc_assert (bb_info);
bitmap_clear (&bb_info->in);
bitmap_clear (&bb_info->out);
@@ -1422,7 +1422,7 @@ static void
df_live_bb_local_compute (unsigned int bb_index)
{
basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
- struct df_live_bb_info *bb_info = df_live_get_bb_info (bb_index);
+ class df_live_bb_info *bb_info = df_live_get_bb_info (bb_index);
rtx_insn *insn;
df_ref def;
int luid = 0;
@@ -1498,8 +1498,8 @@ df_live_init (bitmap all_blocks)
EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
{
- struct df_live_bb_info *bb_info = df_live_get_bb_info (bb_index);
- struct df_lr_bb_info *bb_lr_info = df_lr_get_bb_info (bb_index);
+ class df_live_bb_info *bb_info = df_live_get_bb_info (bb_index);
+ class df_lr_bb_info *bb_lr_info = df_lr_get_bb_info (bb_index);
/* No register may reach a location where it is not used. Thus
we trim the rr result to the places where it is used. */
@@ -1528,8 +1528,8 @@ df_live_confluence_n (edge e)
static bool
df_live_transfer_function (int bb_index)
{
- struct df_live_bb_info *bb_info = df_live_get_bb_info (bb_index);
- struct df_lr_bb_info *bb_lr_info = df_lr_get_bb_info (bb_index);
+ class df_live_bb_info *bb_info = df_live_get_bb_info (bb_index);
+ class df_lr_bb_info *bb_lr_info = df_lr_get_bb_info (bb_index);
bitmap in = &bb_info->in;
bitmap out = &bb_info->out;
bitmap gen = &bb_info->gen;
@@ -1560,8 +1560,8 @@ df_live_finalize (bitmap all_blocks)
EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
{
- struct df_lr_bb_info *bb_lr_info = df_lr_get_bb_info (bb_index);
- struct df_live_bb_info *bb_live_info = df_live_get_bb_info (bb_index);
+ class df_lr_bb_info *bb_lr_info = df_lr_get_bb_info (bb_index);
+ class df_live_bb_info *bb_live_info = df_live_get_bb_info (bb_index);
/* No register may reach a location where it is not used. Thus
we trim the rr result to the places where it is used. */
@@ -1601,7 +1601,7 @@ df_live_free (void)
static void
df_live_top_dump (basic_block bb, FILE *file)
{
- struct df_live_bb_info *bb_info = df_live_get_bb_info (bb->index);
+ class df_live_bb_info *bb_info = df_live_get_bb_info (bb->index);
struct df_live_problem_data *problem_data;
if (!bb_info)
@@ -1630,7 +1630,7 @@ df_live_top_dump (basic_block bb, FILE *file)
static void
df_live_bottom_dump (basic_block bb, FILE *file)
{
- struct df_live_bb_info *bb_info = df_live_get_bb_info (bb->index);
+ class df_live_bb_info *bb_info = df_live_get_bb_info (bb->index);
struct df_live_problem_data *problem_data;
if (!bb_info)
@@ -1742,7 +1742,7 @@ static const struct df_problem problem_LIVE =
df_live_verify_solution_start,/* Incremental solution verify start. */
df_live_verify_solution_end, /* Incremental solution verify end. */
&problem_LR, /* Dependent problem. */
- sizeof (struct df_live_bb_info),/* Size of entry of block_info array. */
+ sizeof (class df_live_bb_info),/* Size of entry of block_info array. */
TV_DF_LIVE, /* Timing variable. */
false /* Reset blocks on dropping out of blocks_to_analyze. */
};
@@ -1797,7 +1797,7 @@ df_live_verify_transfer_functions (void)
FOR_ALL_BB_FN (bb, cfun)
{
- struct df_live_bb_info *bb_info = df_live_get_bb_info (bb->index);
+ class df_live_bb_info *bb_info = df_live_get_bb_info (bb->index);
bitmap_set_bit (&all_blocks, bb->index);
if (bb_info)
@@ -1859,7 +1859,7 @@ static void
df_mir_free_bb_info (basic_block bb ATTRIBUTE_UNUSED,
void *vbb_info)
{
- struct df_mir_bb_info *bb_info = (struct df_mir_bb_info *) vbb_info;
+ class df_mir_bb_info *bb_info = (class df_mir_bb_info *) vbb_info;
if (bb_info)
{
bitmap_clear (&bb_info->gen);
@@ -1896,7 +1896,7 @@ df_mir_alloc (bitmap all_blocks)
EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
{
- struct df_mir_bb_info *bb_info = df_mir_get_bb_info (bb_index);
+ class df_mir_bb_info *bb_info = df_mir_get_bb_info (bb_index);
/* When bitmaps are already initialized, just clear them. */
if (bb_info->kill.obstack)
@@ -1929,7 +1929,7 @@ df_mir_reset (bitmap all_blocks)
EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
{
- struct df_mir_bb_info *bb_info = df_mir_get_bb_info (bb_index);
+ class df_mir_bb_info *bb_info = df_mir_get_bb_info (bb_index);
gcc_assert (bb_info);
@@ -1947,7 +1947,7 @@ static void
df_mir_bb_local_compute (unsigned int bb_index)
{
basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
- struct df_mir_bb_info *bb_info = df_mir_get_bb_info (bb_index);
+ class df_mir_bb_info *bb_info = df_mir_get_bb_info (bb_index);
rtx_insn *insn;
int luid = 0;
@@ -2011,7 +2011,7 @@ df_mir_init (bitmap all_blocks)
static void
df_mir_confluence_0 (basic_block bb)
{
- struct df_mir_bb_info *bb_info = df_mir_get_bb_info (bb->index);
+ class df_mir_bb_info *bb_info = df_mir_get_bb_info (bb->index);
bitmap_clear (&bb_info->in);
}
@@ -2039,7 +2039,7 @@ df_mir_confluence_n (edge e)
static bool
df_mir_transfer_function (int bb_index)
{
- struct df_mir_bb_info *bb_info = df_mir_get_bb_info (bb_index);
+ class df_mir_bb_info *bb_info = df_mir_get_bb_info (bb_index);
bitmap in = &bb_info->in;
bitmap out = &bb_info->out;
bitmap gen = &bb_info->gen;
@@ -2074,7 +2074,7 @@ df_mir_free (void)
static void
df_mir_top_dump (basic_block bb, FILE *file)
{
- struct df_mir_bb_info *bb_info = df_mir_get_bb_info (bb->index);
+ class df_mir_bb_info *bb_info = df_mir_get_bb_info (bb->index);
if (!bb_info)
return;
@@ -2092,7 +2092,7 @@ df_mir_top_dump (basic_block bb, FILE *file)
static void
df_mir_bottom_dump (basic_block bb, FILE *file)
{
- struct df_mir_bb_info *bb_info = df_mir_get_bb_info (bb->index);
+ class df_mir_bb_info *bb_info = df_mir_get_bb_info (bb->index);
if (!bb_info)
return;
@@ -2193,7 +2193,7 @@ static const struct df_problem problem_MIR =
df_mir_verify_solution_start, /* Incremental solution verify start. */
df_mir_verify_solution_end, /* Incremental solution verify end. */
NULL, /* Dependent problem. */
- sizeof (struct df_mir_bb_info),/* Size of entry of block_info array. */
+ sizeof (class df_mir_bb_info),/* Size of entry of block_info array. */
TV_DF_MIR, /* Timing variable. */
false /* Reset blocks on dropping out of blocks_to_analyze. */
};
@@ -2456,7 +2456,7 @@ static void
df_chain_create_bb (unsigned int bb_index)
{
basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
- struct df_rd_bb_info *bb_info = df_rd_get_bb_info (bb_index);
+ class df_rd_bb_info *bb_info = df_rd_get_bb_info (bb_index);
rtx_insn *insn;
bitmap_head cpy;
@@ -2711,7 +2711,7 @@ static void
df_word_lr_free_bb_info (basic_block bb ATTRIBUTE_UNUSED,
void *vbb_info)
{
- struct df_word_lr_bb_info *bb_info = (struct df_word_lr_bb_info *) vbb_info;
+ class df_word_lr_bb_info *bb_info = (class df_word_lr_bb_info *) vbb_info;
if (bb_info)
{
bitmap_clear (&bb_info->use);
@@ -2754,7 +2754,7 @@ df_word_lr_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
EXECUTE_IF_SET_IN_BITMAP (df_word_lr->out_of_date_transfer_functions, 0, bb_index, bi)
{
- struct df_word_lr_bb_info *bb_info = df_word_lr_get_bb_info (bb_index);
+ class df_word_lr_bb_info *bb_info = df_word_lr_get_bb_info (bb_index);
/* When bitmaps are already initialized, just clear them. */
if (bb_info->use.obstack)
@@ -2785,7 +2785,7 @@ df_word_lr_reset (bitmap all_blocks)
EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
{
- struct df_word_lr_bb_info *bb_info = df_word_lr_get_bb_info (bb_index);
+ class df_word_lr_bb_info *bb_info = df_word_lr_get_bb_info (bb_index);
gcc_assert (bb_info);
bitmap_clear (&bb_info->in);
bitmap_clear (&bb_info->out);
@@ -2851,7 +2851,7 @@ static void
df_word_lr_bb_local_compute (unsigned int bb_index)
{
basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
- struct df_word_lr_bb_info *bb_info = df_word_lr_get_bb_info (bb_index);
+ class df_word_lr_bb_info *bb_info = df_word_lr_get_bb_info (bb_index);
rtx_insn *insn;
df_ref def, use;
@@ -2918,7 +2918,7 @@ df_word_lr_init (bitmap all_blocks)
EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
{
- struct df_word_lr_bb_info *bb_info = df_word_lr_get_bb_info (bb_index);
+ class df_word_lr_bb_info *bb_info = df_word_lr_get_bb_info (bb_index);
bitmap_copy (&bb_info->in, &bb_info->use);
bitmap_clear (&bb_info->out);
}
@@ -2942,7 +2942,7 @@ df_word_lr_confluence_n (edge e)
static bool
df_word_lr_transfer_function (int bb_index)
{
- struct df_word_lr_bb_info *bb_info = df_word_lr_get_bb_info (bb_index);
+ class df_word_lr_bb_info *bb_info = df_word_lr_get_bb_info (bb_index);
bitmap in = &bb_info->in;
bitmap out = &bb_info->out;
bitmap use = &bb_info->use;
@@ -2979,7 +2979,7 @@ df_word_lr_free (void)
static void
df_word_lr_top_dump (basic_block bb, FILE *file)
{
- struct df_word_lr_bb_info *bb_info = df_word_lr_get_bb_info (bb->index);
+ class df_word_lr_bb_info *bb_info = df_word_lr_get_bb_info (bb->index);
if (!bb_info)
return;
@@ -2997,7 +2997,7 @@ df_word_lr_top_dump (basic_block bb, FILE *file)
static void
df_word_lr_bottom_dump (basic_block bb, FILE *file)
{
- struct df_word_lr_bb_info *bb_info = df_word_lr_get_bb_info (bb->index);
+ class df_word_lr_bb_info *bb_info = df_word_lr_get_bb_info (bb->index);
if (!bb_info)
return;
@@ -3032,7 +3032,7 @@ static const struct df_problem problem_WORD_LR =
NULL, /* Incremental solution verify start. */
NULL, /* Incremental solution verify end. */
NULL, /* Dependent problem. */
- sizeof (struct df_word_lr_bb_info),/* Size of entry of block_info array. */
+ sizeof (class df_word_lr_bb_info),/* Size of entry of block_info array. */
TV_DF_WORD_LR, /* Timing variable. */
false /* Reset blocks on dropping out of blocks_to_analyze. */
};
@@ -4348,7 +4348,7 @@ static void
df_md_free_bb_info (basic_block bb ATTRIBUTE_UNUSED,
void *vbb_info)
{
- struct df_md_bb_info *bb_info = (struct df_md_bb_info *) vbb_info;
+ class df_md_bb_info *bb_info = (class df_md_bb_info *) vbb_info;
if (bb_info)
{
bitmap_clear (&bb_info->kill);
@@ -4383,7 +4383,7 @@ df_md_alloc (bitmap all_blocks)
EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
{
- struct df_md_bb_info *bb_info = df_md_get_bb_info (bb_index);
+ class df_md_bb_info *bb_info = df_md_get_bb_info (bb_index);
/* When bitmaps are already initialized, just clear them. */
if (bb_info->init.obstack)
{
@@ -4452,7 +4452,7 @@ df_md_simulate_one_insn (basic_block bb ATTRIBUTE_UNUSED, rtx_insn *insn,
}
static void
-df_md_bb_local_compute_process_def (struct df_md_bb_info *bb_info,
+df_md_bb_local_compute_process_def (class df_md_bb_info *bb_info,
df_ref def,
int top_flag)
{
@@ -4493,7 +4493,7 @@ static void
df_md_bb_local_compute (unsigned int bb_index)
{
basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
- struct df_md_bb_info *bb_info = df_md_get_bb_info (bb_index);
+ class df_md_bb_info *bb_info = df_md_get_bb_info (bb_index);
rtx_insn *insn;
/* Artificials are only hard regs. */
@@ -4571,7 +4571,7 @@ df_md_reset (bitmap all_blocks)
EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
{
- struct df_md_bb_info *bb_info = df_md_get_bb_info (bb_index);
+ class df_md_bb_info *bb_info = df_md_get_bb_info (bb_index);
gcc_assert (bb_info);
bitmap_clear (&bb_info->in);
bitmap_clear (&bb_info->out);
@@ -4582,7 +4582,7 @@ static bool
df_md_transfer_function (int bb_index)
{
basic_block bb = BASIC_BLOCK_FOR_FN (cfun, bb_index);
- struct df_md_bb_info *bb_info = df_md_get_bb_info (bb_index);
+ class df_md_bb_info *bb_info = df_md_get_bb_info (bb_index);
bitmap in = &bb_info->in;
bitmap out = &bb_info->out;
bitmap gen = &bb_info->gen;
@@ -4610,7 +4610,7 @@ df_md_init (bitmap all_blocks)
EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
{
- struct df_md_bb_info *bb_info = df_md_get_bb_info (bb_index);
+ class df_md_bb_info *bb_info = df_md_get_bb_info (bb_index);
bitmap_copy (&bb_info->in, &bb_info->init);
df_md_transfer_function (bb_index);
@@ -4620,7 +4620,7 @@ df_md_init (bitmap all_blocks)
static void
df_md_confluence_0 (basic_block bb)
{
- struct df_md_bb_info *bb_info = df_md_get_bb_info (bb->index);
+ class df_md_bb_info *bb_info = df_md_get_bb_info (bb->index);
bitmap_copy (&bb_info->in, &bb_info->init);
}
@@ -4667,7 +4667,7 @@ df_md_free (void)
static void
df_md_top_dump (basic_block bb, FILE *file)
{
- struct df_md_bb_info *bb_info = df_md_get_bb_info (bb->index);
+ class df_md_bb_info *bb_info = df_md_get_bb_info (bb->index);
if (!bb_info)
return;
@@ -4686,7 +4686,7 @@ df_md_top_dump (basic_block bb, FILE *file)
static void
df_md_bottom_dump (basic_block bb, FILE *file)
{
- struct df_md_bb_info *bb_info = df_md_get_bb_info (bb->index);
+ class df_md_bb_info *bb_info = df_md_get_bb_info (bb->index);
if (!bb_info)
return;
@@ -4718,7 +4718,7 @@ static const struct df_problem problem_MD =
NULL, /* Incremental solution verify start. */
NULL, /* Incremental solution verify end. */
NULL, /* Dependent problem. */
- sizeof (struct df_md_bb_info),/* Size of entry of block_info array. */
+ sizeof (class df_md_bb_info),/* Size of entry of block_info array. */
TV_DF_MD, /* Timing variable. */
false /* Reset blocks on dropping out of blocks_to_analyze. */
};
diff --git a/gcc/df-scan.c b/gcc/df-scan.c
index ffaca0b66fd..03294a8a2c3 100644
--- a/gcc/df-scan.c
+++ b/gcc/df-scan.c
@@ -53,25 +53,25 @@ public:
auto_vec<df_mw_hardreg *, 32> mw_vec;
};
-static void df_ref_record (enum df_ref_class, struct df_collection_rec *,
+static void df_ref_record (enum df_ref_class, class df_collection_rec *,
rtx, rtx *,
basic_block, struct df_insn_info *,
enum df_ref_type, int ref_flags);
-static void df_def_record_1 (struct df_collection_rec *, rtx *,
+static void df_def_record_1 (class df_collection_rec *, rtx *,
basic_block, struct df_insn_info *,
int ref_flags);
-static void df_defs_record (struct df_collection_rec *, rtx,
+static void df_defs_record (class df_collection_rec *, rtx,
basic_block, struct df_insn_info *,
int ref_flags);
-static void df_uses_record (struct df_collection_rec *,
+static void df_uses_record (class df_collection_rec *,
rtx *, enum df_ref_type,
basic_block, struct df_insn_info *,
int ref_flags);
static void df_install_ref_incremental (df_ref);
-static void df_insn_refs_collect (struct df_collection_rec*,
+static void df_insn_refs_collect (class df_collection_rec*,
basic_block, struct df_insn_info *);
-static void df_canonize_collection_rec (struct df_collection_rec *);
+static void df_canonize_collection_rec (class df_collection_rec *);
static void df_get_regular_block_artificial_uses (bitmap);
static void df_get_eh_block_artificial_uses (bitmap);
@@ -84,13 +84,13 @@ static void df_grow_ref_info (struct df_ref_info *, unsigned int);
static void df_ref_chain_delete_du_chain (df_ref);
static void df_ref_chain_delete (df_ref);
-static void df_refs_add_to_chains (struct df_collection_rec *,
+static void df_refs_add_to_chains (class df_collection_rec *,
basic_block, rtx_insn *, unsigned int);
-static bool df_insn_refs_verify (struct df_collection_rec *, basic_block,
+static bool df_insn_refs_verify (class df_collection_rec *, basic_block,
rtx_insn *, bool);
-static void df_entry_block_defs_collect (struct df_collection_rec *, bitmap);
-static void df_exit_block_uses_collect (struct df_collection_rec *, bitmap);
+static void df_entry_block_defs_collect (class df_collection_rec *, bitmap);
+static void df_exit_block_uses_collect (class df_collection_rec *, bitmap);
static void df_install_ref (df_ref, struct df_reg_info *,
struct df_ref_info *, bool);
@@ -983,7 +983,7 @@ df_insn_delete (rtx_insn *insn)
/* Free all of the refs and the mw_hardregs in COLLECTION_REC. */
static void
-df_free_collection_rec (struct df_collection_rec *collection_rec)
+df_free_collection_rec (class df_collection_rec *collection_rec)
{
unsigned int ix;
struct df_scan_problem_data *problem_data
@@ -1014,7 +1014,7 @@ df_insn_rescan (rtx_insn *insn)
unsigned int uid = INSN_UID (insn);
struct df_insn_info *insn_info = NULL;
basic_block bb = BLOCK_FOR_INSN (insn);
- struct df_collection_rec collection_rec;
+ class df_collection_rec collection_rec;
if ((!df) || (!INSN_P (insn)))
return false;
@@ -1976,7 +1976,7 @@ df_notes_rescan (rtx_insn *insn)
{
basic_block bb = BLOCK_FOR_INSN (insn);
rtx note;
- struct df_collection_rec collection_rec;
+ class df_collection_rec collection_rec;
unsigned int i;
df_mw_hardreg_chain_delete_eq_uses (insn_info);
@@ -2269,7 +2269,7 @@ df_sort_and_compress_mws (vec<df_mw_hardreg *, va_heap> *mw_vec)
/* Sort and remove duplicates from the COLLECTION_REC. */
static void
-df_canonize_collection_rec (struct df_collection_rec *collection_rec)
+df_canonize_collection_rec (class df_collection_rec *collection_rec)
{
df_sort_and_compress_refs (&collection_rec->def_vec);
df_sort_and_compress_refs (&collection_rec->use_vec);
@@ -2405,7 +2405,7 @@ df_install_mws (const vec<df_mw_hardreg *, va_heap> *old_vec)
chains and update other necessary information. */
static void
-df_refs_add_to_chains (struct df_collection_rec *collection_rec,
+df_refs_add_to_chains (class df_collection_rec *collection_rec,
basic_block bb, rtx_insn *insn, unsigned int flags)
{
if (insn)
@@ -2467,7 +2467,7 @@ df_refs_add_to_chains (struct df_collection_rec *collection_rec,
static df_ref
df_ref_create_structure (enum df_ref_class cl,
- struct df_collection_rec *collection_rec,
+ class df_collection_rec *collection_rec,
rtx reg, rtx *loc,
basic_block bb, struct df_insn_info *info,
enum df_ref_type ref_type,
@@ -2553,7 +2553,7 @@ df_ref_create_structure (enum df_ref_class cl,
static void
df_ref_record (enum df_ref_class cl,
- struct df_collection_rec *collection_rec,
+ class df_collection_rec *collection_rec,
rtx reg, rtx *loc,
basic_block bb, struct df_insn_info *insn_info,
enum df_ref_type ref_type,
@@ -2625,7 +2625,7 @@ df_ref_record (enum df_ref_class cl,
Any change here has to be matched in df_find_hard_reg_defs_1. */
static void
-df_def_record_1 (struct df_collection_rec *collection_rec,
+df_def_record_1 (class df_collection_rec *collection_rec,
rtx *loc, basic_block bb, struct df_insn_info *insn_info,
int flags)
{
@@ -2690,7 +2690,7 @@ df_def_record_1 (struct df_collection_rec *collection_rec,
here has to be matched in df_find_hard_reg_defs. */
static void
-df_defs_record (struct df_collection_rec *collection_rec,
+df_defs_record (class df_collection_rec *collection_rec,
rtx x, basic_block bb, struct df_insn_info *insn_info,
int flags)
{
@@ -2796,7 +2796,7 @@ df_find_hard_reg_defs (rtx x, HARD_REG_SET *defs)
/* Process all the registers used in the rtx at address LOC. */
static void
-df_uses_record (struct df_collection_rec *collection_rec,
+df_uses_record (class df_collection_rec *collection_rec,
rtx *loc, enum df_ref_type ref_type,
basic_block bb, struct df_insn_info *insn_info,
int flags)
@@ -3055,7 +3055,7 @@ df_uses_record (struct df_collection_rec *collection_rec,
/* For all DF_REF_CONDITIONAL defs, add a corresponding uses. */
static void
-df_get_conditional_uses (struct df_collection_rec *collection_rec)
+df_get_conditional_uses (class df_collection_rec *collection_rec)
{
unsigned int ix;
df_ref ref;
@@ -3079,7 +3079,7 @@ df_get_conditional_uses (struct df_collection_rec *collection_rec)
/* Get call's extra defs and uses (track caller-saved registers). */
static void
-df_get_call_refs (struct df_collection_rec *collection_rec,
+df_get_call_refs (class df_collection_rec *collection_rec,
basic_block bb,
struct df_insn_info *insn_info,
int flags)
@@ -3162,7 +3162,7 @@ df_get_call_refs (struct df_collection_rec *collection_rec,
and reg chains. */
static void
-df_insn_refs_collect (struct df_collection_rec *collection_rec,
+df_insn_refs_collect (class df_collection_rec *collection_rec,
basic_block bb, struct df_insn_info *insn_info)
{
rtx note;
@@ -3258,7 +3258,7 @@ df_recompute_luids (basic_block bb)
to COLLECTION_REC. */
static void
-df_bb_refs_collect (struct df_collection_rec *collection_rec, basic_block bb)
+df_bb_refs_collect (class df_collection_rec *collection_rec, basic_block bb)
{
collection_rec->def_vec.truncate (0);
collection_rec->use_vec.truncate (0);
@@ -3558,7 +3558,7 @@ df_get_entry_block_def_set (bitmap entry_block_defs)
reference to include. */
static void
-df_entry_block_defs_collect (struct df_collection_rec *collection_rec,
+df_entry_block_defs_collect (class df_collection_rec *collection_rec,
bitmap entry_block_defs)
{
unsigned int i;
@@ -3580,7 +3580,7 @@ df_entry_block_defs_collect (struct df_collection_rec *collection_rec,
static void
df_record_entry_block_defs (bitmap entry_block_defs)
{
- struct df_collection_rec collection_rec;
+ class df_collection_rec collection_rec;
df_entry_block_defs_collect (&collection_rec, entry_block_defs);
/* Process bb_refs chain */
@@ -3715,7 +3715,7 @@ df_get_exit_block_use_set (bitmap exit_block_uses)
It uses df->exit_block_uses to determine register to include. */
static void
-df_exit_block_uses_collect (struct df_collection_rec *collection_rec, bitmap exit_block_uses)
+df_exit_block_uses_collect (class df_collection_rec *collection_rec, bitmap exit_block_uses)
{
unsigned int i;
bitmap_iterator bi;
@@ -3744,7 +3744,7 @@ df_exit_block_uses_collect (struct df_collection_rec *collection_rec, bitmap exi
static void
df_record_exit_block_uses (bitmap exit_block_uses)
{
- struct df_collection_rec collection_rec;
+ class df_collection_rec collection_rec;
df_exit_block_uses_collect (&collection_rec, exit_block_uses);
/* Process bb_refs chain */
@@ -4052,7 +4052,7 @@ df_mws_verify (const vec<df_mw_hardreg *, va_heap> *new_rec,
If ABORT_IF_FAIL is set, this function never returns false. */
static bool
-df_insn_refs_verify (struct df_collection_rec *collection_rec,
+df_insn_refs_verify (class df_collection_rec *collection_rec,
basic_block bb,
rtx_insn *insn,
bool abort_if_fail)
@@ -4093,7 +4093,7 @@ df_bb_verify (basic_block bb)
{
rtx_insn *insn;
struct df_scan_bb_info *bb_info = df_scan_get_bb_info (bb->index);
- struct df_collection_rec collection_rec;
+ class df_collection_rec collection_rec;
gcc_assert (bb_info);
diff --git a/gcc/df.h b/gcc/df.h
index 13358f9ad3c..2e3b825065e 100644
--- a/gcc/df.h
+++ b/gcc/df.h
@@ -30,7 +30,7 @@ along with GCC; see the file COPYING3. If not see
#include "timevar.h"
struct dataflow;
-struct df_d;
+class df_d;
struct df_problem;
struct df_link;
struct df_insn_info;
@@ -935,7 +935,7 @@ public:
/* This is used for debugging and for the dumpers to find the latest
instance so that the df info can be added to the dumps. This
should not be used by regular code. */
-extern struct df_d *df;
+extern class df_d *df;
#define df_scan (df->problems_by_index[DF_SCAN])
#define df_rd (df->problems_by_index[DF_RD])
#define df_lr (df->problems_by_index[DF_LR])
@@ -968,7 +968,7 @@ extern void df_remove_problem (struct dataflow *);
extern void df_finish_pass (bool);
extern void df_analyze_problem (struct dataflow *, bitmap, int *, int);
extern void df_analyze ();
-extern void df_analyze_loop (struct loop *);
+extern void df_analyze_loop (class loop *);
extern int df_get_n_blocks (enum df_flow_dir);
extern int *df_get_postorder (enum df_flow_dir);
extern void df_simple_dataflow (enum df_flow_dir, df_init_function,
@@ -1103,56 +1103,56 @@ df_scan_get_bb_info (unsigned int index)
return NULL;
}
-static inline struct df_rd_bb_info *
+static inline class df_rd_bb_info *
df_rd_get_bb_info (unsigned int index)
{
if (index < df_rd->block_info_size)
- return &((struct df_rd_bb_info *) df_rd->block_info)[index];
+ return &((class df_rd_bb_info *) df_rd->block_info)[index];
else
return NULL;
}
-static inline struct df_lr_bb_info *
+static inline class df_lr_bb_info *
df_lr_get_bb_info (unsigned int index)
{
if (index < df_lr->block_info_size)
- return &((struct df_lr_bb_info *) df_lr->block_info)[index];
+ return &((class df_lr_bb_info *) df_lr->block_info)[index];
else
return NULL;
}
-static inline struct df_md_bb_info *
+static inline class df_md_bb_info *
df_md_get_bb_info (unsigned int index)
{
if (index < df_md->block_info_size)
- return &((struct df_md_bb_info *) df_md->block_info)[index];
+ return &((class df_md_bb_info *) df_md->block_info)[index];
else
return NULL;
}
-static inline struct df_live_bb_info *
+static inline class df_live_bb_info *
df_live_get_bb_info (unsigned int index)
{
if (index < df_live->block_info_size)
- return &((struct df_live_bb_info *) df_live->block_info)[index];
+ return &((class df_live_bb_info *) df_live->block_info)[index];
else
return NULL;
}
-static inline struct df_word_lr_bb_info *
+static inline class df_word_lr_bb_info *
df_word_lr_get_bb_info (unsigned int index)
{
if (index < df_word_lr->block_info_size)
- return &((struct df_word_lr_bb_info *) df_word_lr->block_info)[index];
+ return &((class df_word_lr_bb_info *) df_word_lr->block_info)[index];
else
return NULL;
}
-static inline struct df_mir_bb_info *
+static inline class df_mir_bb_info *
df_mir_get_bb_info (unsigned int index)
{
if (index < df_mir->block_info_size)
- return &((struct df_mir_bb_info *) df_mir->block_info)[index];
+ return &((class df_mir_bb_info *) df_mir->block_info)[index];
else
return NULL;
}
diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi
index 14c1ea6a323..8e5b01c9383 100644
--- a/gcc/doc/tm.texi
+++ b/gcc/doc/tm.texi
@@ -4301,7 +4301,7 @@ with machine mode @var{mode}. The default version of this
hook returns true for both @code{ptr_mode} and @code{Pmode}.
@end deftypefn
-@deftypefn {Target Hook} bool TARGET_REF_MAY_ALIAS_ERRNO (struct ao_ref *@var{ref})
+@deftypefn {Target Hook} bool TARGET_REF_MAY_ALIAS_ERRNO (ao_ref *@var{ref})
Define this to return nonzero if the memory reference @var{ref} may alias with the system C library errno location. The default version of this hook assumes the system C library errno location is either a declaration of type int or accessed by dereferencing a pointer to int.
@end deftypefn
@@ -6052,11 +6052,11 @@ type @code{internal_fn}) should be considered expensive when the mask is
all zeros. GCC can then try to branch around the instruction instead.
@end deftypefn
-@deftypefn {Target Hook} {void *} TARGET_VECTORIZE_INIT_COST (struct loop *@var{loop_info})
+@deftypefn {Target Hook} {void *} TARGET_VECTORIZE_INIT_COST (class loop *@var{loop_info})
This hook should initialize target-specific data structures in preparation for modeling the costs of vectorizing a loop or basic block. The default allocates three unsigned integers for accumulating costs for the prologue, body, and epilogue of the loop or basic block. If @var{loop_info} is non-NULL, it identifies the loop being vectorized; otherwise a single block is being vectorized.
@end deftypefn
-@deftypefn {Target Hook} unsigned TARGET_VECTORIZE_ADD_STMT_COST (void *@var{data}, int @var{count}, enum vect_cost_for_stmt @var{kind}, struct _stmt_vec_info *@var{stmt_info}, int @var{misalign}, enum vect_cost_model_location @var{where})
+@deftypefn {Target Hook} unsigned TARGET_VECTORIZE_ADD_STMT_COST (void *@var{data}, int @var{count}, enum vect_cost_for_stmt @var{kind}, class _stmt_vec_info *@var{stmt_info}, int @var{misalign}, enum vect_cost_model_location @var{where})
This hook should update the target-specific @var{data} in response to adding @var{count} copies of the given @var{kind} of statement to a loop or basic block. The default adds the builtin vectorizer cost for the copies of the statement to the accumulator specified by @var{where}, (the prologue, body, or epilogue) and returns the amount added. The return value should be viewed as a tentative cost that may later be revised.
@end deftypefn
@@ -11610,7 +11610,7 @@ function version at run-time for a given set of function versions.
body must be generated.
@end deftypefn
-@deftypefn {Target Hook} bool TARGET_PREDICT_DOLOOP_P (struct loop *@var{loop})
+@deftypefn {Target Hook} bool TARGET_PREDICT_DOLOOP_P (class loop *@var{loop})
Return true if we can predict it is possible to use a low-overhead loop
for a particular loop. The parameter @var{loop} is a pointer to the loop.
This target hook is required only when the target supports low-overhead
@@ -11815,7 +11815,7 @@ This function prepares to emit a conditional comparison within a sequence
@var{bit_code} is @code{AND} or @code{IOR}, which is the op on the compares.
@end deftypefn
-@deftypefn {Target Hook} unsigned TARGET_LOOP_UNROLL_ADJUST (unsigned @var{nunroll}, struct loop *@var{loop})
+@deftypefn {Target Hook} unsigned TARGET_LOOP_UNROLL_ADJUST (unsigned @var{nunroll}, class loop *@var{loop})
This target hook returns a new value for the number of times @var{loop}
should be unrolled. The parameter @var{nunroll} is the number of times
the loop is to be unrolled. The parameter @var{loop} is a pointer to
diff --git a/gcc/dse.c b/gcc/dse.c
index a3cefbf527f..a1c7e3bc942 100644
--- a/gcc/dse.c
+++ b/gcc/dse.c
@@ -278,7 +278,7 @@ public:
} positions_needed;
/* The next store info for this insn. */
- struct store_info *next;
+ class store_info *next;
/* The right hand side of the store. This is used if there is a
subsequent reload of the mems address somewhere later in the
@@ -326,9 +326,9 @@ public:
rtx mem;
/* The next read_info for this insn. */
- struct read_info_type *next;
+ class read_info_type *next;
};
-typedef struct read_info_type *read_info_t;
+typedef class read_info_type *read_info_t;
static object_allocator<read_info_type> read_info_type_pool ("read_info_pool");
@@ -1509,7 +1509,7 @@ record_store (rtx body, bb_info_t bb_info)
while (ptr)
{
insn_info_t next = ptr->next_local_store;
- struct store_info *s_info = ptr->store_rec;
+ class store_info *s_info = ptr->store_rec;
bool del = true;
/* Skip the clobbers. We delete the active insn if this insn
diff --git a/gcc/dumpfile.h b/gcc/dumpfile.h
index 8614483f36d..ad8bcb2a05a 100644
--- a/gcc/dumpfile.h
+++ b/gcc/dumpfile.h
@@ -648,7 +648,7 @@ extern void dump_combine_total_stats (FILE *);
/* In cfghooks.c */
extern void dump_bb (FILE *, basic_block, int, dump_flags_t);
-struct opt_pass;
+class opt_pass;
namespace gcc {
diff --git a/gcc/emit-rtl.c b/gcc/emit-rtl.c
index f60f2e21837..a667cdab94e 100644
--- a/gcc/emit-rtl.c
+++ b/gcc/emit-rtl.c
@@ -351,7 +351,7 @@ const_fixed_hasher::equal (rtx x, rtx y)
/* Return true if the given memory attributes are equal. */
bool
-mem_attrs_eq_p (const struct mem_attrs *p, const struct mem_attrs *q)
+mem_attrs_eq_p (const class mem_attrs *p, const class mem_attrs *q)
{
if (p == q)
return true;
@@ -1924,7 +1924,7 @@ set_mem_attributes_minus_bitpos (rtx ref, tree t, int objectp,
{
poly_int64 apply_bitpos = 0;
tree type;
- struct mem_attrs attrs, *defattrs, *refattrs;
+ class mem_attrs attrs, *defattrs, *refattrs;
addr_space_t as;
/* It can happen that type_for_mode was given a mode for which there
@@ -2334,7 +2334,7 @@ change_address (rtx memref, machine_mode mode, rtx addr)
{
rtx new_rtx = change_address_1 (memref, mode, addr, 1, false);
machine_mode mmode = GET_MODE (new_rtx);
- struct mem_attrs *defattrs;
+ class mem_attrs *defattrs;
mem_attrs attrs (*get_mem_attrs (memref));
defattrs = mode_mem_attrs[(int) mmode];
@@ -2378,7 +2378,7 @@ adjust_address_1 (rtx memref, machine_mode mode, poly_int64 offset,
rtx addr = XEXP (memref, 0);
rtx new_rtx;
scalar_int_mode address_mode;
- struct mem_attrs attrs (*get_mem_attrs (memref)), *defattrs;
+ class mem_attrs attrs (*get_mem_attrs (memref)), *defattrs;
unsigned HOST_WIDE_INT max_align;
#ifdef POINTERS_EXTEND_UNSIGNED
scalar_int_mode pointer_mode
@@ -2524,7 +2524,7 @@ offset_address (rtx memref, rtx offset, unsigned HOST_WIDE_INT pow2)
{
rtx new_rtx, addr = XEXP (memref, 0);
machine_mode address_mode;
- struct mem_attrs *defattrs;
+ class mem_attrs *defattrs;
mem_attrs attrs (*get_mem_attrs (memref));
address_mode = get_address_mode (memref);
diff --git a/gcc/emit-rtl.h b/gcc/emit-rtl.h
index 7b1cecd3c44..7643bf9cefb 100644
--- a/gcc/emit-rtl.h
+++ b/gcc/emit-rtl.h
@@ -20,8 +20,8 @@ along with GCC; see the file COPYING3. If not see
#ifndef GCC_EMIT_RTL_H
#define GCC_EMIT_RTL_H
-struct temp_slot;
-typedef struct temp_slot *temp_slot_p;
+class temp_slot;
+typedef class temp_slot *temp_slot_p;
/* Information mainlined about RTL representation of incoming arguments. */
struct GTY(()) incoming_args {
@@ -110,7 +110,7 @@ struct GTY(()) rtl_data {
vec<rtx, va_gc> *x_stack_slot_list;
/* List of empty areas in the stack frame. */
- struct frame_space *frame_space_list;
+ class frame_space *frame_space_list;
/* Place after which to insert the tail_recursion_label if we need one. */
rtx_note *x_stack_check_probe_note;
@@ -136,7 +136,7 @@ struct GTY(()) rtl_data {
vec<temp_slot_p, va_gc> *x_used_temp_slots;
/* List of available temp slots. */
- struct temp_slot *x_avail_temp_slots;
+ class temp_slot *x_avail_temp_slots;
/* Current nesting level for temporaries. */
int x_temp_slot_level;
@@ -319,7 +319,7 @@ extern GTY(()) struct rtl_data x_rtl;
#define crtl (&x_rtl)
/* Return whether two MEM_ATTRs are equal. */
-bool mem_attrs_eq_p (const struct mem_attrs *, const struct mem_attrs *);
+bool mem_attrs_eq_p (const class mem_attrs *, const class mem_attrs *);
/* Set the alias set of MEM to SET. */
extern void set_mem_alias_set (rtx, alias_set_type);
diff --git a/gcc/except.c b/gcc/except.c
index b1f04ee6861..1e6f8af258b 100644
--- a/gcc/except.c
+++ b/gcc/except.c
@@ -1015,7 +1015,7 @@ dw2_build_landing_pads (void)
make_single_succ_edge (bb, bb->next_bb, e_flags);
if (current_loops)
{
- struct loop *loop = bb->next_bb->loop_father;
+ class loop *loop = bb->next_bb->loop_father;
/* If we created a pre-header block, add the new block to the
outer loop, otherwise to the loop itself. */
if (bb->next_bb == loop->header)
@@ -1389,7 +1389,7 @@ sjlj_emit_dispatch_table (rtx_code_label *dispatch_label, int num_dispatch)
make_single_succ_edge (bb, bb->next_bb, EDGE_FALLTHRU);
if (current_loops)
{
- struct loop *loop = bb->next_bb->loop_father;
+ class loop *loop = bb->next_bb->loop_father;
/* If we created a pre-header block, add the new block to the
outer loop, otherwise to the loop itself. */
if (bb->next_bb == loop->header)
@@ -1427,7 +1427,7 @@ sjlj_emit_dispatch_table (rtx_code_label *dispatch_label, int num_dispatch)
make_single_succ_edge (bb, bb->next_bb, EDGE_FALLTHRU);
if (current_loops)
{
- struct loop *loop = bb->next_bb->loop_father;
+ class loop *loop = bb->next_bb->loop_father;
/* If we created a pre-header block, add the new block to the
outer loop, otherwise to the loop itself. */
if (bb->next_bb == loop->header)
diff --git a/gcc/explow.c b/gcc/explow.c
index aea7118af1e..7eb854bca4a 100644
--- a/gcc/explow.c
+++ b/gcc/explow.c
@@ -1489,7 +1489,7 @@ allocate_dynamic_stack_space (rtx size, unsigned size_align,
stack pointer, such as acquiring the space by calling malloc(). */
if (targetm.have_allocate_stack ())
{
- struct expand_operand ops[2];
+ class expand_operand ops[2];
/* We don't have to check against the predicate for operand 0 since
TARGET is known to be a pseudo of the proper mode, which must
be valid for the operand. */
@@ -1620,7 +1620,7 @@ emit_stack_probe (rtx address)
{
if (targetm.have_probe_stack_address ())
{
- struct expand_operand ops[1];
+ class expand_operand ops[1];
insn_code icode = targetm.code_for_probe_stack_address;
create_address_operand (ops, address);
maybe_legitimize_operands (icode, 0, 1, ops);
@@ -1680,7 +1680,7 @@ probe_stack_range (HOST_WIDE_INT first, rtx size)
/* Next see if we have an insn to check the stack. */
else if (targetm.have_check_stack ())
{
- struct expand_operand ops[1];
+ class expand_operand ops[1];
rtx addr = memory_address (Pmode,
gen_rtx_fmt_ee (STACK_GROW_OP, Pmode,
stack_pointer_rtx,
diff --git a/gcc/expmed.c b/gcc/expmed.c
index d7f8e9a5d76..c582f3a1e62 100644
--- a/gcc/expmed.c
+++ b/gcc/expmed.c
@@ -599,7 +599,7 @@ store_bit_field_using_insv (const extraction_insn *insv, rtx op0,
unsigned HOST_WIDE_INT bitnum,
rtx value, scalar_int_mode value_mode)
{
- struct expand_operand ops[4];
+ class expand_operand ops[4];
rtx value1;
rtx xop0 = op0;
rtx_insn *last = get_last_insn ();
@@ -759,7 +759,7 @@ store_bit_field_1 (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum,
&& known_eq (bitsize, GET_MODE_BITSIZE (innermode))
&& multiple_p (bitnum, GET_MODE_BITSIZE (innermode), &pos))
{
- struct expand_operand ops[3];
+ class expand_operand ops[3];
enum insn_code icode = optab_handler (vec_set_optab, outermode);
create_fixed_operand (&ops[0], op0);
@@ -870,7 +870,7 @@ store_integral_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
&& known_eq (bitsize, GET_MODE_BITSIZE (fieldmode))
&& optab_handler (movstrict_optab, fieldmode) != CODE_FOR_nothing)
{
- struct expand_operand ops[2];
+ class expand_operand ops[2];
enum insn_code icode = optab_handler (movstrict_optab, fieldmode);
rtx arg0 = op0;
unsigned HOST_WIDE_INT subreg_off;
@@ -1499,7 +1499,7 @@ extract_bit_field_using_extv (const extraction_insn *extv, rtx op0,
int unsignedp, rtx target,
machine_mode mode, machine_mode tmode)
{
- struct expand_operand ops[4];
+ class expand_operand ops[4];
rtx spec_target = target;
rtx spec_target_subreg = 0;
scalar_int_mode ext_mode = extv->field_mode;
@@ -1655,7 +1655,7 @@ extract_bit_field_1 (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum,
!= CODE_FOR_nothing)
&& multiple_p (bitnum, GET_MODE_BITSIZE (tmode), &pos))
{
- struct expand_operand ops[3];
+ class expand_operand ops[3];
machine_mode outermode = new_mode;
machine_mode innermode = tmode;
enum insn_code icode
@@ -1722,7 +1722,7 @@ extract_bit_field_1 (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum,
&& known_eq (bitsize, GET_MODE_BITSIZE (innermode))
&& multiple_p (bitnum, GET_MODE_BITSIZE (innermode), &pos))
{
- struct expand_operand ops[3];
+ class expand_operand ops[3];
create_output_operand (&ops[0], target, innermode);
ops[0].target = 1;
@@ -5428,7 +5428,7 @@ emit_cstore (rtx target, enum insn_code icode, enum rtx_code code,
int unsignedp, rtx x, rtx y, int normalizep,
machine_mode target_mode)
{
- struct expand_operand ops[4];
+ class expand_operand ops[4];
rtx op0, comparison, subtarget;
rtx_insn *last;
scalar_int_mode result_mode = targetm.cstore_mode (icode);
diff --git a/gcc/expr.c b/gcc/expr.c
index c922aaa45b9..ff1f224d6ce 100644
--- a/gcc/expr.c
+++ b/gcc/expr.c
@@ -1769,7 +1769,7 @@ emit_block_move_via_cpymem (rtx x, rtx y, rtx size, unsigned int align,
|| max_size <= (GET_MODE_MASK (mode) >> 1)
|| GET_MODE_BITSIZE (mode) >= GET_MODE_BITSIZE (Pmode)))
{
- struct expand_operand ops[9];
+ class expand_operand ops[9];
unsigned int nops;
/* ??? When called via emit_block_move_for_call, it'd be
@@ -1932,7 +1932,7 @@ expand_cmpstrn_or_cmpmem (insn_code icode, rtx target, rtx arg1_rtx,
if (target && (!REG_P (target) || HARD_REGISTER_P (target)))
target = NULL_RTX;
- struct expand_operand ops[5];
+ class expand_operand ops[5];
create_output_operand (&ops[0], target, insn_mode);
create_fixed_operand (&ops[1], arg1_rtx);
create_fixed_operand (&ops[2], arg2_rtx);
@@ -3137,7 +3137,7 @@ set_storage_via_setmem (rtx object, rtx size, rtx val, unsigned int align,
|| max_size <= (GET_MODE_MASK (mode) >> 1)
|| GET_MODE_BITSIZE (mode) >= GET_MODE_BITSIZE (Pmode)))
{
- struct expand_operand ops[9];
+ class expand_operand ops[9];
unsigned int nops;
nops = insn_data[(int) code].n_generator_args;
@@ -4181,7 +4181,7 @@ emit_single_push_insn_1 (machine_mode mode, rtx x, tree type)
icode = optab_handler (push_optab, mode);
if (icode != CODE_FOR_nothing)
{
- struct expand_operand ops[1];
+ class expand_operand ops[1];
create_input_operand (&ops[0], x, mode);
if (maybe_expand_insn (icode, 1, ops))
@@ -5027,7 +5027,7 @@ expand_assignment (tree to, tree from, bool nontemporal)
if (icode != CODE_FOR_nothing)
{
- struct expand_operand ops[2];
+ class expand_operand ops[2];
create_fixed_operand (&ops[0], mem);
create_input_operand (&ops[1], reg, mode);
@@ -5456,7 +5456,7 @@ expand_assignment (tree to, tree from, bool nontemporal)
bool
emit_storent_insn (rtx to, rtx from)
{
- struct expand_operand ops[2];
+ class expand_operand ops[2];
machine_mode mode = GET_MODE (to);
enum insn_code code = optab_handler (storent_optab, mode);
@@ -6759,7 +6759,7 @@ store_constructor (tree exp, rtx target, int cleared, poly_int64 size,
!= CODE_FOR_nothing)
&& (elt = uniform_vector_p (exp)))
{
- struct expand_operand ops[2];
+ class expand_operand ops[2];
create_output_operand (&ops[0], target, mode);
create_input_operand (&ops[1], expand_normal (elt), eltmode);
expand_insn (icode, 2, ops);
@@ -9554,7 +9554,7 @@ expand_expr_real_2 (sepops ops, rtx target, machine_mode tmode,
&& mode == TYPE_MODE (TREE_TYPE (treeop0))
&& SCALAR_INT_MODE_P (mode))
{
- struct expand_operand eops[4];
+ class expand_operand eops[4];
machine_mode imode = TYPE_MODE (TREE_TYPE (treeop0));
expand_operands (treeop0, treeop1,
subtarget, &op0, &op1, EXPAND_NORMAL);
@@ -10292,7 +10292,7 @@ expand_expr_real_1 (tree exp, rtx target, machine_mode tmode,
&& ((icode = optab_handler (movmisalign_optab, mode))
!= CODE_FOR_nothing))
{
- struct expand_operand ops[2];
+ class expand_operand ops[2];
/* We've already validated the memory, and we're creating a
new pseudo destination. The predicates really can't fail,
@@ -10374,7 +10374,7 @@ expand_expr_real_1 (tree exp, rtx target, machine_mode tmode,
if ((icode = optab_handler (movmisalign_optab, mode))
!= CODE_FOR_nothing)
{
- struct expand_operand ops[2];
+ class expand_operand ops[2];
/* We've already validated the memory, and we're creating a
new pseudo destination. The predicates really can't fail,
@@ -12180,7 +12180,7 @@ try_casesi (tree index_type, tree index_expr, tree minval, tree range,
rtx table_label, rtx default_label, rtx fallback_label,
profile_probability default_probability)
{
- struct expand_operand ops[5];
+ class expand_operand ops[5];
scalar_int_mode index_mode = SImode;
rtx op1, op2, index;
diff --git a/gcc/flags.h b/gcc/flags.h
index 0cc7df2666c..0b9cd12e6cc 100644
--- a/gcc/flags.h
+++ b/gcc/flags.h
@@ -56,9 +56,9 @@ public:
enum excess_precision x_flag_excess_precision;
};
-extern struct target_flag_state default_target_flag_state;
+extern class target_flag_state default_target_flag_state;
#if SWITCHABLE_TARGET
-extern struct target_flag_state *this_target_flag_state;
+extern class target_flag_state *this_target_flag_state;
#else
#define this_target_flag_state (&default_target_flag_state)
#endif
diff --git a/gcc/function.c b/gcc/function.c
index b44c4d0ee33..373c1f2beb2 100644
--- a/gcc/function.c
+++ b/gcc/function.c
@@ -133,7 +133,7 @@ vec<tree, va_gc> *types_used_by_cur_var_decl;
/* Forward declarations. */
-static struct temp_slot *find_temp_slot_from_address (rtx);
+static class temp_slot *find_temp_slot_from_address (rtx);
static void pad_to_arg_alignment (struct args_size *, int, struct args_size *);
static void pad_below (struct args_size *, machine_mode, tree);
static void reorder_blocks_1 (rtx_insn *, tree, vec<tree> *);
@@ -345,7 +345,7 @@ try_fit_stack_local (poly_int64 start, poly_int64 length,
static void
add_frame_space (poly_int64 start, poly_int64 end)
{
- struct frame_space *space = ggc_alloc<frame_space> ();
+ class frame_space *space = ggc_alloc<frame_space> ();
space->next = crtl->frame_space_list;
crtl->frame_space_list = space;
space->start = start;
@@ -441,11 +441,11 @@ assign_stack_local_1 (machine_mode mode, poly_int64 size,
{
if (kind & ASLK_RECORD_PAD)
{
- struct frame_space **psp;
+ class frame_space **psp;
for (psp = &crtl->frame_space_list; *psp; psp = &(*psp)->next)
{
- struct frame_space *space = *psp;
+ class frame_space *space = *psp;
if (!try_fit_stack_local (space->start, space->length, size,
alignment, &slot_offset))
continue;
@@ -559,9 +559,9 @@ assign_stack_local (machine_mode mode, poly_int64 size, int align)
class GTY(()) temp_slot {
public:
/* Points to next temporary slot. */
- struct temp_slot *next;
+ class temp_slot *next;
/* Points to previous temporary slot. */
- struct temp_slot *prev;
+ class temp_slot *prev;
/* The rtx to used to reference the slot. */
rtx slot;
/* The size, in units, of the slot. */
@@ -589,7 +589,7 @@ public:
struct GTY((for_user)) temp_slot_address_entry {
hashval_t hash;
rtx address;
- struct temp_slot *temp_slot;
+ class temp_slot *temp_slot;
};
struct temp_address_hasher : ggc_ptr_hash<temp_slot_address_entry>
@@ -606,7 +606,7 @@ static size_t n_temp_slots_in_use;
/* Removes temporary slot TEMP from LIST. */
static void
-cut_slot_from_list (struct temp_slot *temp, struct temp_slot **list)
+cut_slot_from_list (class temp_slot *temp, class temp_slot **list)
{
if (temp->next)
temp->next->prev = temp->prev;
@@ -621,7 +621,7 @@ cut_slot_from_list (struct temp_slot *temp, struct temp_slot **list)
/* Inserts temporary slot TEMP to LIST. */
static void
-insert_slot_to_list (struct temp_slot *temp, struct temp_slot **list)
+insert_slot_to_list (class temp_slot *temp, class temp_slot **list)
{
temp->next = *list;
if (*list)
@@ -632,7 +632,7 @@ insert_slot_to_list (struct temp_slot *temp, struct temp_slot **list)
/* Returns the list of used temp slots at LEVEL. */
-static struct temp_slot **
+static class temp_slot **
temp_slots_at_level (int level)
{
if (level >= (int) vec_safe_length (used_temp_slots))
@@ -655,7 +655,7 @@ max_slot_level (void)
/* Moves temporary slot TEMP to LEVEL. */
static void
-move_slot_to_level (struct temp_slot *temp, int level)
+move_slot_to_level (class temp_slot *temp, int level)
{
cut_slot_from_list (temp, temp_slots_at_level (temp->level));
insert_slot_to_list (temp, temp_slots_at_level (level));
@@ -665,7 +665,7 @@ move_slot_to_level (struct temp_slot *temp, int level)
/* Make temporary slot TEMP available. */
static void
-make_slot_available (struct temp_slot *temp)
+make_slot_available (class temp_slot *temp)
{
cut_slot_from_list (temp, temp_slots_at_level (temp->level));
insert_slot_to_list (temp, &avail_temp_slots);
@@ -701,7 +701,7 @@ temp_address_hasher::equal (temp_slot_address_entry *t1,
/* Add ADDRESS as an alias of TEMP_SLOT to the addess -> temp slot mapping. */
static void
-insert_temp_slot_address (rtx address, struct temp_slot *temp_slot)
+insert_temp_slot_address (rtx address, class temp_slot *temp_slot)
{
struct temp_slot_address_entry *t = ggc_alloc<temp_slot_address_entry> ();
t->address = address;
@@ -735,10 +735,10 @@ remove_unused_temp_slot_addresses (void)
/* Find the temp slot corresponding to the object at address X. */
-static struct temp_slot *
+static class temp_slot *
find_temp_slot_from_address (rtx x)
{
- struct temp_slot *p;
+ class temp_slot *p;
struct temp_slot_address_entry tmp, *t;
/* First try the easy way:
@@ -787,7 +787,7 @@ rtx
assign_stack_temp_for_type (machine_mode mode, poly_int64 size, tree type)
{
unsigned int align;
- struct temp_slot *p, *best_p = 0, *selected = NULL, **pp;
+ class temp_slot *p, *best_p = 0, *selected = NULL, **pp;
rtx slot;
gcc_assert (known_size_p (size));
@@ -1031,7 +1031,7 @@ assign_temp (tree type_or_decl, int memory_required,
static void
combine_temp_slots (void)
{
- struct temp_slot *p, *q, *next, *next_q;
+ class temp_slot *p, *q, *next, *next_q;
int num_slots;
/* We can't combine slots, because the information about which slot
@@ -1095,7 +1095,7 @@ combine_temp_slots (void)
void
update_temp_slot_address (rtx old_rtx, rtx new_rtx)
{
- struct temp_slot *p;
+ class temp_slot *p;
if (rtx_equal_p (old_rtx, new_rtx))
return;
@@ -1149,7 +1149,7 @@ update_temp_slot_address (rtx old_rtx, rtx new_rtx)
void
preserve_temp_slots (rtx x)
{
- struct temp_slot *p = 0, *next;
+ class temp_slot *p = 0, *next;
if (x == 0)
return;
@@ -1189,7 +1189,7 @@ preserve_temp_slots (rtx x)
void
free_temp_slots (void)
{
- struct temp_slot *p, *next;
+ class temp_slot *p, *next;
bool some_available = false;
for (p = *temp_slots_at_level (temp_slot_level); p; p = next)
diff --git a/gcc/function.h b/gcc/function.h
index c2596bf48d5..43ac5dffd24 100644
--- a/gcc/function.h
+++ b/gcc/function.h
@@ -186,7 +186,7 @@ struct GTY(()) function_subsections {
class GTY(()) frame_space
{
public:
- struct frame_space *next;
+ class frame_space *next;
poly_int64 start;
poly_int64 length;
@@ -243,7 +243,7 @@ struct GTY(()) function {
char *pass_startwith;
/* The stack usage of this function. */
- struct stack_usage *su;
+ class stack_usage *su;
/* Value histograms attached to particular statements. */
htab_t GTY((skip)) value_histograms;
diff --git a/gcc/fwprop.c b/gcc/fwprop.c
index e6f37527192..137864cb61b 100644
--- a/gcc/fwprop.c
+++ b/gcc/fwprop.c
@@ -224,8 +224,8 @@ edge
single_def_use_dom_walker::before_dom_children (basic_block bb)
{
int bb_index = bb->index;
- struct df_md_bb_info *md_bb_info = df_md_get_bb_info (bb_index);
- struct df_lr_bb_info *lr_bb_info = df_lr_get_bb_info (bb_index);
+ class df_md_bb_info *md_bb_info = df_md_get_bb_info (bb_index);
+ class df_lr_bb_info *lr_bb_info = df_lr_get_bb_info (bb_index);
rtx_insn *insn;
bitmap_copy (local_md, &md_bb_info->in);
diff --git a/gcc/gcc-rich-location.h b/gcc/gcc-rich-location.h
index de92e3b2b13..3bee2e82ad5 100644
--- a/gcc/gcc-rich-location.h
+++ b/gcc/gcc-rich-location.h
@@ -181,7 +181,7 @@ class maybe_range_label_for_tree_type_mismatch : public range_label
tree m_other_expr;
};
-struct op_location_t;
+class op_location_t;
/* A subclass of rich_location for showing problems with binary operations.
diff --git a/gcc/gcov.c b/gcc/gcov.c
index 1d9d3b25b5d..c65b7153765 100644
--- a/gcc/gcov.c
+++ b/gcc/gcov.c
@@ -76,17 +76,17 @@ using namespace std;
/* This is the size of the buffer used to read in source file lines. */
-struct function_info;
-struct block_info;
-struct source_info;
+class function_info;
+class block_info;
+class source_info;
/* Describes an arc between two basic blocks. */
struct arc_info
{
/* source and destination blocks. */
- struct block_info *src;
- struct block_info *dst;
+ class block_info *src;
+ class block_info *dst;
/* transition counts. */
gcov_type count;
@@ -178,7 +178,7 @@ public:
/* Temporary chain for solving graph, and for chaining blocks on one
line. */
- struct block_info *chain;
+ class block_info *chain;
};
@@ -297,7 +297,7 @@ public:
vector<line_info> lines;
/* Next function. */
- struct function_info *next;
+ class function_info *next;
/* Get demangled name of a function. The demangled name
is converted when it is used for the first time. */
diff --git a/gcc/genattrtab.c b/gcc/genattrtab.c
index 604526d6808..cdf0b5c12dc 100644
--- a/gcc/genattrtab.c
+++ b/gcc/genattrtab.c
@@ -136,7 +136,7 @@ static struct obstack *temp_obstack = &obstack2;
class insn_def
{
public:
- struct insn_def *next; /* Next insn in chain. */
+ class insn_def *next; /* Next insn in chain. */
rtx def; /* The DEFINE_... */
int insn_code; /* Instruction number. */
int insn_index; /* Expression number in file, for errors. */
@@ -152,7 +152,7 @@ public:
struct insn_ent
{
struct insn_ent *next; /* Next in chain. */
- struct insn_def *def; /* Instruction definition. */
+ class insn_def *def; /* Instruction definition. */
};
/* Each value of an attribute (either constant or computed) is assigned a
@@ -175,7 +175,7 @@ class attr_desc
public:
char *name; /* Name of attribute. */
const char *enum_name; /* Enum name for DEFINE_ENUM_NAME. */
- struct attr_desc *next; /* Next attribute. */
+ class attr_desc *next; /* Next attribute. */
struct attr_value *first_value; /* First value of this attribute. */
struct attr_value *default_val; /* Default value for this attribute. */
file_location loc; /* Where in the .md files it occurs. */
@@ -190,7 +190,7 @@ class delay_desc
{
public:
rtx def; /* DEFINE_DELAY expression. */
- struct delay_desc *next; /* Next DEFINE_DELAY. */
+ class delay_desc *next; /* Next DEFINE_DELAY. */
file_location loc; /* Where in the .md files it occurs. */
int num; /* Number of DEFINE_DELAY, starting at 1. */
};
@@ -199,7 +199,7 @@ struct attr_value_list
{
struct attr_value *av;
struct insn_ent *ie;
- struct attr_desc *attr;
+ class attr_desc *attr;
struct attr_value_list *next;
};
@@ -207,9 +207,9 @@ struct attr_value_list
/* This one is indexed by the first character of the attribute name. */
#define MAX_ATTRS_INDEX 256
-static struct attr_desc *attrs[MAX_ATTRS_INDEX];
-static struct insn_def *defs;
-static struct delay_desc *delays;
+static class attr_desc *attrs[MAX_ATTRS_INDEX];
+static class insn_def *defs;
+static class delay_desc *delays;
struct attr_value_list **insn_code_values;
/* Other variables. */
@@ -260,7 +260,7 @@ static char *attr_string (const char *, int);
static char *attr_printf (unsigned int, const char *, ...)
ATTRIBUTE_PRINTF_2;
static rtx make_numeric_value (int);
-static struct attr_desc *find_attr (const char **, int);
+static class attr_desc *find_attr (const char **, int);
static rtx mk_attr_alt (alternative_mask);
static char *next_comma_elt (const char **);
static rtx insert_right_side (enum rtx_code, rtx, rtx, int, int);
@@ -278,15 +278,15 @@ static rtx copy_rtx_unchanging (rtx);
static bool attr_alt_subset_p (rtx, rtx);
static bool attr_alt_subset_of_compl_p (rtx, rtx);
static void clear_struct_flag (rtx);
-static void write_attr_valueq (FILE *, struct attr_desc *, const char *);
-static struct attr_value *find_most_used (struct attr_desc *);
-static void write_attr_set (FILE *, struct attr_desc *, int, rtx,
+static void write_attr_valueq (FILE *, class attr_desc *, const char *);
+static struct attr_value *find_most_used (class attr_desc *);
+static void write_attr_set (FILE *, class attr_desc *, int, rtx,
const char *, const char *, rtx,
int, int, unsigned int);
-static void write_attr_case (FILE *, struct attr_desc *,
+static void write_attr_case (FILE *, class attr_desc *,
struct attr_value *,
int, const char *, const char *, int, rtx);
-static void write_attr_value (FILE *, struct attr_desc *, rtx);
+static void write_attr_value (FILE *, class attr_desc *, rtx);
static void write_upcase (FILE *, const char *);
static void write_indent (FILE *, int);
static rtx identity_fn (rtx);
@@ -847,7 +847,7 @@ check_attr_test (file_location loc, rtx exp, attr_desc *attr)
Return a perhaps modified replacement expression for the value. */
static rtx
-check_attr_value (file_location loc, rtx exp, struct attr_desc *attr)
+check_attr_value (file_location loc, rtx exp, class attr_desc *attr)
{
struct attr_value *av;
const char *p;
@@ -957,7 +957,7 @@ check_attr_value (file_location loc, rtx exp, struct attr_desc *attr)
case ATTR:
{
- struct attr_desc *attr2 = find_attr (&XSTR (exp, 0), 0);
+ class attr_desc *attr2 = find_attr (&XSTR (exp, 0), 0);
if (attr2 == NULL)
error_at (loc, "unknown attribute `%s' in ATTR",
XSTR (exp, 0));
@@ -991,7 +991,7 @@ check_attr_value (file_location loc, rtx exp, struct attr_desc *attr)
It becomes a COND with each test being (eq_attr "alternative" "n") */
static rtx
-convert_set_attr_alternative (rtx exp, struct insn_def *id)
+convert_set_attr_alternative (rtx exp, class insn_def *id)
{
int num_alt = id->num_alternatives;
rtx condexp;
@@ -1027,7 +1027,7 @@ convert_set_attr_alternative (rtx exp, struct insn_def *id)
list of values is given, convert to SET_ATTR_ALTERNATIVE first. */
static rtx
-convert_set_attr (rtx exp, struct insn_def *id)
+convert_set_attr (rtx exp, class insn_def *id)
{
rtx newexp;
const char *name_ptr;
@@ -1061,8 +1061,8 @@ convert_set_attr (rtx exp, struct insn_def *id)
static void
check_defs (void)
{
- struct insn_def *id;
- struct attr_desc *attr;
+ class insn_def *id;
+ class attr_desc *attr;
int i;
rtx value;
@@ -1119,7 +1119,7 @@ check_defs (void)
value. LOC is the location to use for error reporting. */
static rtx
-make_canonical (file_location loc, struct attr_desc *attr, rtx exp)
+make_canonical (file_location loc, class attr_desc *attr, rtx exp)
{
int i;
rtx newexp;
@@ -1226,7 +1226,7 @@ copy_boolean (rtx exp)
alternatives. LOC is the location to use for error reporting. */
static struct attr_value *
-get_attr_value (file_location loc, rtx value, struct attr_desc *attr,
+get_attr_value (file_location loc, rtx value, class attr_desc *attr,
int insn_code)
{
struct attr_value *av;
@@ -1276,7 +1276,7 @@ get_attr_value (file_location loc, rtx value, struct attr_desc *attr,
static void
expand_delays (void)
{
- struct delay_desc *delay;
+ class delay_desc *delay;
rtx condexp;
rtx newexp;
int i;
@@ -1362,11 +1362,11 @@ expand_delays (void)
the attribute. */
static void
-fill_attr (struct attr_desc *attr)
+fill_attr (class attr_desc *attr)
{
struct attr_value *av;
struct insn_ent *ie;
- struct insn_def *id;
+ class insn_def *id;
int i;
rtx value;
@@ -1491,7 +1491,7 @@ make_length_attrs (void)
static rtx (*const address_fn[]) (rtx)
= {max_fn, min_fn, one_fn, identity_fn};
size_t i;
- struct attr_desc *length_attr, *new_attr;
+ class attr_desc *length_attr, *new_attr;
struct attr_value *av, *new_av;
struct insn_ent *ie, *new_ie;
@@ -1565,7 +1565,7 @@ min_fn (rtx exp)
static void
write_length_unit_log (FILE *outf)
{
- struct attr_desc *length_attr = find_attr (&length_str, 0);
+ class attr_desc *length_attr = find_attr (&length_str, 0);
struct attr_value *av;
struct insn_ent *ie;
unsigned int length_unit_log, length_or;
@@ -1924,7 +1924,7 @@ make_alternative_compare (alternative_mask mask)
corresponding to INSN_CODE and INSN_INDEX. */
static rtx
-evaluate_eq_attr (rtx exp, struct attr_desc *attr, rtx value,
+evaluate_eq_attr (rtx exp, class attr_desc *attr, rtx value,
int insn_code, int insn_index)
{
rtx orexp, andexp;
@@ -2417,7 +2417,7 @@ static rtx
simplify_test_exp (rtx exp, int insn_code, int insn_index)
{
rtx left, right;
- struct attr_desc *attr;
+ class attr_desc *attr;
struct attr_value *av;
struct insn_ent *ie;
struct attr_value_list *iv;
@@ -2758,7 +2758,7 @@ simplify_test_exp (rtx exp, int insn_code, int insn_index)
otherwise return 0. */
static int
-tests_attr_p (rtx p, struct attr_desc *attr)
+tests_attr_p (rtx p, class attr_desc *attr)
{
const char *fmt;
int i, ie, j, je;
@@ -2799,18 +2799,18 @@ tests_attr_p (rtx p, struct attr_desc *attr)
attr_desc pointers), and return the size of that array. */
static int
-get_attr_order (struct attr_desc ***ret)
+get_attr_order (class attr_desc ***ret)
{
int i, j;
int num = 0;
- struct attr_desc *attr;
- struct attr_desc **all, **sorted;
+ class attr_desc *attr;
+ class attr_desc **all, **sorted;
char *handled;
for (i = 0; i < MAX_ATTRS_INDEX; i++)
for (attr = attrs[i]; attr; attr = attr->next)
num++;
- all = XNEWVEC (struct attr_desc *, num);
- sorted = XNEWVEC (struct attr_desc *, num);
+ all = XNEWVEC (class attr_desc *, num);
+ sorted = XNEWVEC (class attr_desc *, num);
handled = XCNEWVEC (char, num);
num = 0;
for (i = 0; i < MAX_ATTRS_INDEX; i++)
@@ -2858,7 +2858,7 @@ get_attr_order (struct attr_desc ***ret)
if (DEBUG)
for (j = 0; j < num; j++)
{
- struct attr_desc *attr2;
+ class attr_desc *attr2;
struct attr_value *av;
attr = sorted[j];
@@ -2889,14 +2889,14 @@ get_attr_order (struct attr_desc ***ret)
static void
optimize_attrs (int num_insn_codes)
{
- struct attr_desc *attr;
+ class attr_desc *attr;
struct attr_value *av;
struct insn_ent *ie;
rtx newexp;
int i;
struct attr_value_list *ivbuf;
struct attr_value_list *iv;
- struct attr_desc **topsort;
+ class attr_desc **topsort;
int topnum;
/* For each insn code, make a list of all the insn_ent's for it,
@@ -3044,7 +3044,7 @@ clear_struct_flag (rtx x)
/* Add attribute value NAME to the beginning of ATTR's list. */
static void
-add_attr_value (struct attr_desc *attr, const char *name)
+add_attr_value (class attr_desc *attr, const char *name)
{
struct attr_value *av;
@@ -3064,7 +3064,7 @@ gen_attr (md_rtx_info *info)
{
struct enum_type *et;
struct enum_value *ev;
- struct attr_desc *attr;
+ class attr_desc *attr;
const char *name_ptr;
char *p;
rtx def = info->def;
@@ -3195,10 +3195,10 @@ compares_alternatives_p (rtx exp)
static void
gen_insn (md_rtx_info *info)
{
- struct insn_def *id;
+ class insn_def *id;
rtx def = info->def;
- id = oballoc (struct insn_def);
+ id = oballoc (class insn_def);
id->next = defs;
defs = id;
id->def = def;
@@ -3243,7 +3243,7 @@ gen_insn (md_rtx_info *info)
static void
gen_delay (md_rtx_info *info)
{
- struct delay_desc *delay;
+ class delay_desc *delay;
int i;
rtx def = info->def;
@@ -3262,7 +3262,7 @@ gen_delay (md_rtx_info *info)
have_annul_false = 1;
}
- delay = oballoc (struct delay_desc);
+ delay = oballoc (class delay_desc);
delay->def = def;
delay->num = ++num_delays;
delay->next = delays;
@@ -3289,7 +3289,7 @@ find_attrs_to_cache (rtx exp, bool create)
{
int i;
const char *name;
- struct attr_desc *attr;
+ class attr_desc *attr;
if (exp == NULL)
return;
@@ -3369,7 +3369,7 @@ write_test_expr (FILE *outf, rtx exp, unsigned int attrs_cached, int flags,
{
int comparison_operator = 0;
RTX_CODE code;
- struct attr_desc *attr;
+ class attr_desc *attr;
if (emit_parens)
fprintf (outf, "(");
@@ -4042,7 +4042,7 @@ walk_attr_value (rtx exp)
/* Write out a function to obtain the attribute for a given INSN. */
static void
-write_attr_get (FILE *outf, struct attr_desc *attr)
+write_attr_get (FILE *outf, class attr_desc *attr)
{
struct attr_value *av, *common_av;
int i, j;
@@ -4099,7 +4099,7 @@ write_attr_get (FILE *outf, struct attr_desc *attr)
if ((attrs_seen_more_than_once & (1U << i)) != 0)
{
const char *name = cached_attrs[i];
- struct attr_desc *cached_attr;
+ class attr_desc *cached_attr;
if (i != j)
cached_attrs[j] = name;
cached_attr = find_attr (&name, 0);
@@ -4163,7 +4163,7 @@ eliminate_known_true (rtx known_true, rtx exp, int insn_code, int insn_index)
and ";"). */
static void
-write_attr_set (FILE *outf, struct attr_desc *attr, int indent, rtx value,
+write_attr_set (FILE *outf, class attr_desc *attr, int indent, rtx value,
const char *prefix, const char *suffix, rtx known_true,
int insn_code, int insn_index, unsigned int attrs_cached)
{
@@ -4291,7 +4291,7 @@ write_insn_cases (FILE *outf, struct insn_ent *ie, int indent)
/* Write out the computation for one attribute value. */
static void
-write_attr_case (FILE *outf, struct attr_desc *attr, struct attr_value *av,
+write_attr_case (FILE *outf, class attr_desc *attr, struct attr_value *av,
int write_case_lines, const char *prefix, const char *suffix,
int indent, rtx known_true)
{
@@ -4355,7 +4355,7 @@ write_attr_case (FILE *outf, struct attr_desc *attr, struct attr_value *av,
/* Utilities to write in various forms. */
static void
-write_attr_valueq (FILE *outf, struct attr_desc *attr, const char *s)
+write_attr_valueq (FILE *outf, class attr_desc *attr, const char *s)
{
if (attr->is_numeric)
{
@@ -4375,7 +4375,7 @@ write_attr_valueq (FILE *outf, struct attr_desc *attr, const char *s)
}
static void
-write_attr_value (FILE *outf, struct attr_desc *attr, rtx value)
+write_attr_value (FILE *outf, class attr_desc *attr, rtx value)
{
int op;
@@ -4395,7 +4395,7 @@ write_attr_value (FILE *outf, struct attr_desc *attr, rtx value)
case ATTR:
{
- struct attr_desc *attr2 = find_attr (&XSTR (value, 0), 0);
+ class attr_desc *attr2 = find_attr (&XSTR (value, 0), 0);
if (attr->enum_name)
fprintf (outf, "(enum %s)", attr->enum_name);
else if (!attr->is_numeric)
@@ -4503,11 +4503,11 @@ write_dummy_eligible_delay (FILE *outf, const char *kind)
static void
write_eligible_delay (FILE *outf, const char *kind)
{
- struct delay_desc *delay;
+ class delay_desc *delay;
int max_slots;
char str[50];
const char *pstr;
- struct attr_desc *attr;
+ class attr_desc *attr;
struct attr_value *av, *common_av;
int i;
@@ -4639,14 +4639,14 @@ next_comma_elt (const char **pstr)
return attr_string (start, *pstr - start);
}
-/* Return a `struct attr_desc' pointer for a given named attribute. If CREATE
+/* Return a `class attr_desc' pointer for a given named attribute. If CREATE
is nonzero, build a new attribute, if one does not exist. *NAME_P is
replaced by a pointer to a canonical copy of the string. */
-static struct attr_desc *
+static class attr_desc *
find_attr (const char **name_p, int create)
{
- struct attr_desc *attr;
+ class attr_desc *attr;
int index;
const char *name = *name_p;
@@ -4671,7 +4671,7 @@ find_attr (const char **name_p, int create)
if (! create)
return NULL;
- attr = oballoc (struct attr_desc);
+ attr = oballoc (class attr_desc);
attr->name = DEF_ATTR_STRING (name);
attr->enum_name = 0;
attr->first_value = attr->default_val = NULL;
@@ -4689,7 +4689,7 @@ find_attr (const char **name_p, int create)
static void
make_internal_attr (const char *name, rtx value, int special)
{
- struct attr_desc *attr;
+ class attr_desc *attr;
attr = find_attr (&name, 1);
gcc_assert (!attr->default_val);
@@ -4704,7 +4704,7 @@ make_internal_attr (const char *name, rtx value, int special)
/* Find the most used value of an attribute. */
static struct attr_value *
-find_most_used (struct attr_desc *attr)
+find_most_used (class attr_desc *attr)
{
struct attr_value *av;
struct attr_value *most_used;
@@ -4759,7 +4759,7 @@ copy_rtx_unchanging (rtx orig)
static void
write_const_num_delay_slots (FILE *outf)
{
- struct attr_desc *attr = find_attr (&num_delay_slots_str, 0);
+ class attr_desc *attr = find_attr (&num_delay_slots_str, 0);
struct attr_value *av;
if (attr)
@@ -4815,7 +4815,7 @@ gen_insn_reserv (md_rtx_info *info)
struct insn_reserv *decl = oballoc (struct insn_reserv);
rtx def = info->def;
- struct attr_desc attr = { };
+ class attr_desc attr = { };
attr.name = DEF_ATTR_STRING (XSTR (def, 0));
attr.loc = info->loc;
@@ -4932,10 +4932,10 @@ check_tune_attr (const char *name, rtx exp)
/* Try to find a const attribute (usually cpu or tune) that is used
in all define_insn_reservation conditions. */
-static struct attr_desc *
+static class attr_desc *
find_tune_attr (rtx exp)
{
- struct attr_desc *attr;
+ class attr_desc *attr;
switch (GET_CODE (exp))
{
@@ -4979,7 +4979,7 @@ make_automaton_attrs (void)
int i;
struct insn_reserv *decl;
rtx code_exp, lats_exp, byps_exp;
- struct attr_desc *tune_attr;
+ class attr_desc *tune_attr;
if (n_insn_reservs == 0)
return;
@@ -5245,8 +5245,8 @@ handle_arg (const char *arg)
int
main (int argc, const char **argv)
{
- struct attr_desc *attr;
- struct insn_def *id;
+ class attr_desc *attr;
+ class insn_def *id;
int i;
progname = "genattrtab";
diff --git a/gcc/genextract.c b/gcc/genextract.c
index 66886f6fc91..5dff6830c1e 100644
--- a/gcc/genextract.c
+++ b/gcc/genextract.c
@@ -80,7 +80,7 @@ public:
};
/* Forward declarations. */
-static void walk_rtx (md_rtx_info *, rtx, struct accum_extract *);
+static void walk_rtx (md_rtx_info *, rtx, class accum_extract *);
#define UPPER_OFFSET ('A' - ('z' - 'a' + 1))
@@ -89,7 +89,7 @@ static void walk_rtx (md_rtx_info *, rtx, struct accum_extract *);
in ACC. */
static void
push_pathstr_operand (int operand, bool is_vector,
- struct accum_extract *acc)
+ class accum_extract *acc)
{
if (is_vector && 'a' + operand > 'z')
acc->pathstr.safe_push (operand + UPPER_OFFSET);
@@ -106,7 +106,7 @@ gen_insn (md_rtx_info *info)
unsigned int op_count, dup_count, j;
struct extraction *p;
struct code_ptr *link;
- struct accum_extract acc;
+ class accum_extract acc;
/* Walk the insn's pattern, remembering at all times the path
down to the walking point. */
@@ -224,7 +224,7 @@ VEC_char_to_string (vec<char> v)
}
static void
-walk_rtx (md_rtx_info *info, rtx x, struct accum_extract *acc)
+walk_rtx (md_rtx_info *info, rtx x, class accum_extract *acc)
{
RTX_CODE code;
int i, len;
diff --git a/gcc/genmatch.c b/gcc/genmatch.c
index 7403ccb7482..2e7bf27eeda 100644
--- a/gcc/genmatch.c
+++ b/gcc/genmatch.c
@@ -50,7 +50,7 @@ unsigned verbose;
/* libccp helpers. */
-static struct line_maps *line_table;
+static class line_maps *line_table;
/* The rich_location class within libcpp requires a way to expand
location_t instances, and relies on the client code
@@ -416,7 +416,7 @@ public:
unsigned int fn;
};
-struct simplify;
+class simplify;
/* Identifier that maps to a user-defined predicate. */
@@ -665,8 +665,8 @@ typedef hash_map<nofree_string_hash, unsigned> cid_map_t;
/* The AST produced by parsing of the pattern definitions. */
-struct dt_operand;
-struct capture_info;
+class dt_operand;
+class capture_info;
/* The base class for operands. */
@@ -880,7 +880,7 @@ public:
produced when the pattern applies in the leafs.
For a (match ...) the leafs are either empty if it is a simple predicate
or the single expression specifying the matched operands. */
- struct operand *result;
+ class operand *result;
/* Collected 'for' expression operators that have to be replaced
in the lowering phase. */
vec<vec<user_id *> > for_vec;
@@ -933,7 +933,7 @@ print_operand (operand *o, FILE *f = stderr, bool flattened = false)
}
DEBUG_FUNCTION void
-print_matches (struct simplify *s, FILE *f = stderr)
+print_matches (class simplify *s, FILE *f = stderr)
{
fprintf (f, "for expression: ");
print_operand (s->match, f);
@@ -1583,7 +1583,7 @@ lower (vec<simplify *>& simplifiers, bool gimple)
matching code. It represents the 'match' expression of all
simplifies and has those as its leafs. */
-struct dt_simplify;
+class dt_simplify;
/* A hash-map collecting semantically equivalent leafs in the decision
tree for splitting out to separate functions. */
@@ -1719,7 +1719,7 @@ class decision_tree
public:
dt_node *root;
- void insert (struct simplify *, unsigned);
+ void insert (class simplify *, unsigned);
void gen (FILE *f, bool gimple);
void print (FILE *f = stderr);
@@ -2025,7 +2025,7 @@ at_assert_elm:
/* Insert S into the decision tree. */
void
-decision_tree::insert (struct simplify *s, unsigned pattern_no)
+decision_tree::insert (class simplify *s, unsigned pattern_no)
{
current_id = s->id;
dt_operand **indexes = XCNEWVEC (dt_operand *, s->capture_max + 1);
@@ -4190,7 +4190,7 @@ parser::parse_operation ()
/* Parse a capture.
capture = '@'<number> */
-struct operand *
+class operand *
parser::parse_capture (operand *op, bool require_existing)
{
location_t src_loc = eat_token (CPP_ATSIGN)->src_loc;
@@ -4227,7 +4227,7 @@ parser::parse_capture (operand *op, bool require_existing)
/* Parse an expression
expr = '(' <operation>[capture][flag][type] <operand>... ')' */
-struct operand *
+class operand *
parser::parse_expr ()
{
const cpp_token *token = peek ();
@@ -4395,11 +4395,11 @@ parser::parse_c_expr (cpp_ttype start)
a standalone capture.
op = predicate | expr | c_expr | capture */
-struct operand *
+class operand *
parser::parse_op ()
{
const cpp_token *token = peek ();
- struct operand *op = NULL;
+ class operand *op = NULL;
if (token->type == CPP_OPEN_PAREN)
{
eat_token (CPP_OPEN_PAREN);
@@ -4618,7 +4618,7 @@ parser::parse_simplify (simplify::simplify_kind kind,
const cpp_token *loc = peek ();
parsing_match_operand = true;
- struct operand *match = parse_op ();
+ class operand *match = parse_op ();
finish_match_operand (match);
parsing_match_operand = false;
if (match->type == operand::OP_CAPTURE && !matcher)
@@ -5090,7 +5090,7 @@ main (int argc, char **argv)
}
}
- line_table = XCNEW (struct line_maps);
+ line_table = XCNEW (class line_maps);
linemap_init (line_table, 0);
line_table->reallocator = xrealloc;
line_table->round_alloc_size = round_alloc_size;
diff --git a/gcc/genoutput.c b/gcc/genoutput.c
index ab4c46a51f1..03fa48286a2 100644
--- a/gcc/genoutput.c
+++ b/gcc/genoutput.c
@@ -146,7 +146,7 @@ static struct operand_data **odata_end = &null_operand.next;
class data
{
public:
- struct data *next;
+ class data *next;
const char *name;
const char *template_code;
file_location loc;
@@ -161,29 +161,29 @@ public:
};
/* This variable points to the first link in the insn chain. */
-static struct data *idata;
+static class data *idata;
/* This variable points to the end of the insn chain. This is where
everything relevant from the machien description is appended to. */
-static struct data **idata_end;
+static class data **idata_end;
static void output_prologue (void);
static void output_operand_data (void);
static void output_insn_data (void);
static void output_get_insn_name (void);
-static void scan_operands (struct data *, rtx, int, int);
+static void scan_operands (class data *, rtx, int, int);
static int compare_operands (struct operand_data *,
struct operand_data *);
-static void place_operands (struct data *);
-static void process_template (struct data *, const char *);
-static void validate_insn_alternatives (struct data *);
-static void validate_insn_operands (struct data *);
+static void place_operands (class data *);
+static void process_template (class data *, const char *);
+static void validate_insn_alternatives (class data *);
+static void validate_insn_operands (class data *);
class constraint_data
{
public:
- struct constraint_data *next_this_letter;
+ class constraint_data *next_this_letter;
file_location loc;
unsigned int namelen;
char name[1];
@@ -193,7 +193,7 @@ public:
are handled outside the define*_constraint mechanism. */
static const char indep_constraints[] = ",=+%*?!^$#&g";
-static struct constraint_data *
+static class constraint_data *
constraints_by_letter_table[1 << CHAR_BIT];
static int mdep_constraint_len (const char *, file_location, int);
@@ -277,12 +277,12 @@ output_operand_data (void)
static void
output_insn_data (void)
{
- struct data *d;
+ class data *d;
int name_offset = 0;
int next_name_offset;
const char * last_name = 0;
const char * next_name = 0;
- struct data *n;
+ class data *n;
for (n = idata, next_name_offset = 1; n; n = n->next, next_name_offset++)
if (n->name)
@@ -423,7 +423,7 @@ output_get_insn_name (void)
THIS_STRICT_LOW is nonzero if the containing rtx was a STRICT_LOW_PART. */
static void
-scan_operands (struct data *d, rtx part, int this_address_p,
+scan_operands (class data *d, rtx part, int this_address_p,
int this_strict_low)
{
int i, j;
@@ -565,7 +565,7 @@ compare_operands (struct operand_data *d0, struct operand_data *d1)
find a subsequence that is the same, or allocate a new one at the end. */
static void
-place_operands (struct data *d)
+place_operands (class data *d)
{
struct operand_data *od, *od2;
int i;
@@ -619,7 +619,7 @@ place_operands (struct data *d)
templates, or C code to generate the assembler code template. */
static void
-process_template (struct data *d, const char *template_code)
+process_template (class data *d, const char *template_code)
{
const char *cp;
int i;
@@ -742,7 +742,7 @@ process_template (struct data *d, const char *template_code)
/* Check insn D for consistency in number of constraint alternatives. */
static void
-validate_insn_alternatives (struct data *d)
+validate_insn_alternatives (class data *d)
{
int n = 0, start;
@@ -825,7 +825,7 @@ validate_insn_alternatives (struct data *d)
/* Verify that there are no gaps in operand numbers for INSNs. */
static void
-validate_insn_operands (struct data *d)
+validate_insn_operands (class data *d)
{
int i;
@@ -835,7 +835,7 @@ validate_insn_operands (struct data *d)
}
static void
-validate_optab_operands (struct data *d)
+validate_optab_operands (class data *d)
{
if (!d->name || d->name[0] == '\0' || d->name[0] == '*')
return;
@@ -980,7 +980,7 @@ gen_expand (md_rtx_info *info)
static void
init_insn_for_nothing (void)
{
- idata = XCNEW (struct data);
+ idata = XCNEW (class data);
new (idata) data ();
idata->name = "*placeholder_for_nothing";
idata->loc = file_location ("<internal>", 0, 0);
@@ -1088,7 +1088,7 @@ note_constraint (md_rtx_info *info)
{
rtx exp = info->def;
const char *name = XSTR (exp, 0);
- struct constraint_data **iter, **slot, *new_cdata;
+ class constraint_data **iter, **slot, *new_cdata;
if (strcmp (name, "TARGET_MEM_CONSTRAINT") == 0)
name = general_mem;
@@ -1138,8 +1138,8 @@ note_constraint (md_rtx_info *info)
return;
}
}
- new_cdata = XNEWVAR (struct constraint_data,
- sizeof (struct constraint_data) + namelen);
+ new_cdata = XNEWVAR (class constraint_data,
+ sizeof (class constraint_data) + namelen);
new (new_cdata) constraint_data ();
strcpy (CONST_CAST (char *, new_cdata->name), name);
new_cdata->namelen = namelen;
@@ -1155,7 +1155,7 @@ note_constraint (md_rtx_info *info)
static int
mdep_constraint_len (const char *s, file_location loc, int opno)
{
- struct constraint_data *p;
+ class constraint_data *p;
p = constraints_by_letter_table[(unsigned int)s[0]];
diff --git a/gcc/genpreds.c b/gcc/genpreds.c
index 293930c695d..556c4bdd869 100644
--- a/gcc/genpreds.c
+++ b/gcc/genpreds.c
@@ -669,8 +669,8 @@ write_one_predicate_function (struct pred_data *p)
class constraint_data
{
public:
- struct constraint_data *next_this_letter;
- struct constraint_data *next_textual;
+ class constraint_data *next_this_letter;
+ class constraint_data *next_textual;
const char *name;
const char *c_name; /* same as .name unless mangling is necessary */
file_location loc; /* location of definition */
@@ -690,13 +690,13 @@ public:
/* Overview of all constraints beginning with a given letter. */
-static struct constraint_data *
+static class constraint_data *
constraints_by_letter_table[1<<CHAR_BIT];
/* For looking up all the constraints in the order that they appeared
in the machine description. */
-static struct constraint_data *first_constraint;
-static struct constraint_data **last_constraint_ptr = &first_constraint;
+static class constraint_data *first_constraint;
+static class constraint_data **last_constraint_ptr = &first_constraint;
#define FOR_ALL_CONSTRAINTS(iter_) \
for (iter_ = first_constraint; iter_; iter_ = iter_->next_textual)
@@ -775,7 +775,7 @@ add_constraint (const char *name, const char *regclass,
rtx exp, bool is_memory, bool is_special_memory,
bool is_address, file_location loc)
{
- struct constraint_data *c, **iter, **slot;
+ class constraint_data *c, **iter, **slot;
const char *p;
bool need_mangled_name = false;
bool is_const_int;
@@ -909,7 +909,7 @@ add_constraint (const char *name, const char *regclass,
}
- c = XOBNEW (rtl_obstack, struct constraint_data);
+ c = XOBNEW (rtl_obstack, class constraint_data);
c->name = name;
c->c_name = need_mangled_name ? mangle (name) : name;
c->loc = loc;
@@ -980,7 +980,7 @@ process_define_register_constraint (md_rtx_info *info)
static void
choose_enum_order (void)
{
- struct constraint_data *c;
+ class constraint_data *c;
enum_order = XNEWVEC (const constraint_data *, num_constraints);
unsigned int next = 0;
@@ -1077,7 +1077,7 @@ write_lookup_constraint_1 (void)
for (i = 0; i < ARRAY_SIZE (constraints_by_letter_table); i++)
{
- struct constraint_data *c = constraints_by_letter_table[i];
+ class constraint_data *c = constraints_by_letter_table[i];
if (!c)
continue;
@@ -1117,7 +1117,7 @@ write_lookup_constraint_array (void)
{
if (i != 0)
printf (",\n ");
- struct constraint_data *c = constraints_by_letter_table[i];
+ class constraint_data *c = constraints_by_letter_table[i];
if (!c)
printf ("CONSTRAINT__UNKNOWN");
else if (c->namelen == 1)
@@ -1143,7 +1143,7 @@ write_insn_constraint_len (void)
for (i = 0; i < ARRAY_SIZE (constraints_by_letter_table); i++)
{
- struct constraint_data *c = constraints_by_letter_table[i];
+ class constraint_data *c = constraints_by_letter_table[i];
if (!c
|| c->namelen == 1)
@@ -1152,7 +1152,7 @@ write_insn_constraint_len (void)
/* Constraints with multiple characters should have the same
length. */
{
- struct constraint_data *c2 = c->next_this_letter;
+ class constraint_data *c2 = c->next_this_letter;
size_t len = c->namelen;
while (c2)
{
@@ -1178,7 +1178,7 @@ write_insn_constraint_len (void)
static void
write_reg_class_for_constraint_1 (void)
{
- struct constraint_data *c;
+ class constraint_data *c;
puts ("enum reg_class\n"
"reg_class_for_constraint_1 (enum constraint_num c)\n"
@@ -1201,7 +1201,7 @@ write_reg_class_for_constraint_1 (void)
static void
write_tm_constrs_h (void)
{
- struct constraint_data *c;
+ class constraint_data *c;
printf ("\
/* Generated automatically by the program '%s'\n\
@@ -1288,7 +1288,7 @@ write_constraint_satisfied_p_array (void)
static void
write_insn_const_int_ok_for_constraint (void)
{
- struct constraint_data *c;
+ class constraint_data *c;
puts ("bool\n"
"insn_const_int_ok_for_constraint (HOST_WIDE_INT ival, "
diff --git a/gcc/genrecog.c b/gcc/genrecog.c
index a3b8e683c63..f20089eeee8 100644
--- a/gcc/genrecog.c
+++ b/gcc/genrecog.c
@@ -950,7 +950,7 @@ list_head <T>::singleton () const
return first == last ? first : 0;
}
-struct state;
+class state;
/* Describes a possible successful return from a routine. */
struct acceptance_type
@@ -1499,7 +1499,7 @@ operator != (const int_set &a, const int_set &b)
return !operator == (a, b);
}
-struct decision;
+class decision;
/* Represents a transition between states, dependent on the result of
a test T. */
@@ -2244,7 +2244,7 @@ optimize_subroutine_group (const char *type, state *root)
st.longest_backtrack, st.longest_backtrack_code);
}
-struct merge_pattern_info;
+class merge_pattern_info;
/* Represents a transition from one pattern to another. */
class merge_pattern_transition
diff --git a/gcc/gensupport.c b/gcc/gensupport.c
index 07aa8391f28..0ad9995b642 100644
--- a/gcc/gensupport.c
+++ b/gcc/gensupport.c
@@ -70,55 +70,55 @@ class queue_elem
public:
rtx data;
file_location loc;
- struct queue_elem *next;
+ class queue_elem *next;
/* In a DEFINE_INSN that came from a DEFINE_INSN_AND_SPLIT or
DEFINE_INSN_AND_REWRITE, SPLIT points to the generated DEFINE_SPLIT. */
- struct queue_elem *split;
+ class queue_elem *split;
};
#define MNEMONIC_ATTR_NAME "mnemonic"
#define MNEMONIC_HTAB_SIZE 1024
-static struct queue_elem *define_attr_queue;
-static struct queue_elem **define_attr_tail = &define_attr_queue;
-static struct queue_elem *define_pred_queue;
-static struct queue_elem **define_pred_tail = &define_pred_queue;
-static struct queue_elem *define_insn_queue;
-static struct queue_elem **define_insn_tail = &define_insn_queue;
-static struct queue_elem *define_cond_exec_queue;
-static struct queue_elem **define_cond_exec_tail = &define_cond_exec_queue;
-static struct queue_elem *define_subst_queue;
-static struct queue_elem **define_subst_tail = &define_subst_queue;
-static struct queue_elem *other_queue;
-static struct queue_elem **other_tail = &other_queue;
-static struct queue_elem *define_subst_attr_queue;
-static struct queue_elem **define_subst_attr_tail = &define_subst_attr_queue;
+static class queue_elem *define_attr_queue;
+static class queue_elem **define_attr_tail = &define_attr_queue;
+static class queue_elem *define_pred_queue;
+static class queue_elem **define_pred_tail = &define_pred_queue;
+static class queue_elem *define_insn_queue;
+static class queue_elem **define_insn_tail = &define_insn_queue;
+static class queue_elem *define_cond_exec_queue;
+static class queue_elem **define_cond_exec_tail = &define_cond_exec_queue;
+static class queue_elem *define_subst_queue;
+static class queue_elem **define_subst_tail = &define_subst_queue;
+static class queue_elem *other_queue;
+static class queue_elem **other_tail = &other_queue;
+static class queue_elem *define_subst_attr_queue;
+static class queue_elem **define_subst_attr_tail = &define_subst_attr_queue;
/* Mapping from DEFINE_* rtxes to their location in the source file. */
static hash_map <rtx, file_location> *rtx_locs;
static void remove_constraints (rtx);
-static int is_predicable (struct queue_elem *);
+static int is_predicable (class queue_elem *);
static void identify_predicable_attribute (void);
static int n_alternatives (const char *);
static void collect_insn_data (rtx, int *, int *);
-static const char *alter_test_for_insn (struct queue_elem *,
- struct queue_elem *);
+static const char *alter_test_for_insn (class queue_elem *,
+ class queue_elem *);
static char *shift_output_template (char *, const char *, int);
-static const char *alter_output_for_insn (struct queue_elem *,
- struct queue_elem *,
+static const char *alter_output_for_insn (class queue_elem *,
+ class queue_elem *,
int, int);
-static void process_one_cond_exec (struct queue_elem *);
+static void process_one_cond_exec (class queue_elem *);
static void process_define_cond_exec (void);
static void init_predicate_table (void);
static void record_insn_name (int, const char *);
-static bool has_subst_attribute (struct queue_elem *, struct queue_elem *);
+static bool has_subst_attribute (class queue_elem *, class queue_elem *);
static const char * alter_output_for_subst_insn (rtx, int);
-static void alter_attrs_for_subst_insn (struct queue_elem *, int);
-static void process_substs_on_one_elem (struct queue_elem *,
- struct queue_elem *);
+static void alter_attrs_for_subst_insn (class queue_elem *, int);
+static void process_substs_on_one_elem (class queue_elem *,
+ class queue_elem *);
static rtx subst_dup (rtx, int, int);
static void process_define_subst (void);
@@ -400,11 +400,11 @@ process_define_predicate (rtx desc, file_location loc)
/* Queue PATTERN on LIST_TAIL. Return the address of the new queue
element. */
-static struct queue_elem *
-queue_pattern (rtx pattern, struct queue_elem ***list_tail,
+static class queue_elem *
+queue_pattern (rtx pattern, class queue_elem ***list_tail,
file_location loc)
{
- struct queue_elem *e = XNEW (struct queue_elem);
+ class queue_elem *e = XNEW (class queue_elem);
e->data = pattern;
e->loc = loc;
e->next = NULL;
@@ -416,9 +416,9 @@ queue_pattern (rtx pattern, struct queue_elem ***list_tail,
/* Remove element ELEM from QUEUE. */
static void
-remove_from_queue (struct queue_elem *elem, struct queue_elem **queue)
+remove_from_queue (class queue_elem *elem, class queue_elem **queue)
{
- struct queue_elem *prev, *e;
+ class queue_elem *prev, *e;
prev = NULL;
for (e = *queue; e ; e = e->next)
{
@@ -440,7 +440,7 @@ remove_from_queue (struct queue_elem *elem, struct queue_elem **queue)
static void
add_define_attr (const char *name)
{
- struct queue_elem *e = XNEW (struct queue_elem);
+ class queue_elem *e = XNEW (class queue_elem);
rtx t1 = rtx_alloc (DEFINE_ATTR);
XSTR (t1, 0) = name;
XSTR (t1, 1) = "no,yes";
@@ -591,8 +591,8 @@ process_rtx (rtx desc, file_location loc)
rtx split;
rtvec attr;
int i;
- struct queue_elem *insn_elem;
- struct queue_elem *split_elem;
+ class queue_elem *insn_elem;
+ class queue_elem *split_elem;
int split_code = (GET_CODE (desc) == DEFINE_INSN_AND_REWRITE ? 5 : 6);
/* Create a split with values from the insn_and_split. */
@@ -646,7 +646,7 @@ process_rtx (rtx desc, file_location loc)
a DEFINE_INSN. */
static int
-is_predicable (struct queue_elem *elem)
+is_predicable (class queue_elem *elem)
{
rtvec vec = XVEC (elem->data, 4);
const char *value;
@@ -716,8 +716,8 @@ is_predicable (struct queue_elem *elem)
/* Find attribute SUBST in ELEM and assign NEW_VALUE to it. */
static void
-change_subst_attribute (struct queue_elem *elem,
- struct queue_elem *subst_elem,
+change_subst_attribute (class queue_elem *elem,
+ class queue_elem *subst_elem,
const char *new_value)
{
rtvec attrs_vec = XVEC (elem->data, 4);
@@ -746,7 +746,7 @@ change_subst_attribute (struct queue_elem *elem,
words, we suppose the default value of the attribute to be 'no' since it is
always generated automatically in read-rtl.c. */
static bool
-has_subst_attribute (struct queue_elem *elem, struct queue_elem *subst_elem)
+has_subst_attribute (class queue_elem *elem, class queue_elem *subst_elem)
{
rtvec attrs_vec = XVEC (elem->data, 4);
const char *value, *subst_name = XSTR (subst_elem->data, 0);
@@ -979,7 +979,7 @@ subst_pattern_match (rtx x, rtx pt, file_location loc)
static void
identify_predicable_attribute (void)
{
- struct queue_elem *elem;
+ class queue_elem *elem;
char *p_true, *p_false;
const char *value;
@@ -1327,8 +1327,8 @@ alter_constraints (rtx pattern, int n_dup, constraints_handler_t alter)
}
static const char *
-alter_test_for_insn (struct queue_elem *ce_elem,
- struct queue_elem *insn_elem)
+alter_test_for_insn (class queue_elem *ce_elem,
+ class queue_elem *insn_elem)
{
return rtx_reader_ptr->join_c_conditions (XSTR (ce_elem->data, 1),
XSTR (insn_elem->data, 2));
@@ -1439,7 +1439,7 @@ alter_attrs_for_insn (rtx insn)
if (!global_changes_made)
{
- struct queue_elem *elem;
+ class queue_elem *elem;
global_changes_made = true;
add_define_attr ("ce_enabled");
@@ -1480,7 +1480,7 @@ alter_attrs_for_insn (rtx insn)
ELEM is a queue element, containing our rtl-template,
N_DUP - multiplication factor. */
static void
-alter_attrs_for_subst_insn (struct queue_elem * elem, int n_dup)
+alter_attrs_for_subst_insn (class queue_elem * elem, int n_dup)
{
rtvec vec = XVEC (elem->data, 4);
int num_elem;
@@ -1543,8 +1543,8 @@ shift_output_template (char *dest, const char *src, int disp)
}
static const char *
-alter_output_for_insn (struct queue_elem *ce_elem,
- struct queue_elem *insn_elem,
+alter_output_for_insn (class queue_elem *ce_elem,
+ class queue_elem *insn_elem,
int alt, int max_op)
{
const char *ce_out, *insn_out;
@@ -1732,9 +1732,9 @@ alter_output_for_subst_insn (rtx insn, int alt)
/* Replicate insns as appropriate for the given DEFINE_COND_EXEC. */
static void
-process_one_cond_exec (struct queue_elem *ce_elem)
+process_one_cond_exec (class queue_elem *ce_elem)
{
- struct queue_elem *insn_elem;
+ class queue_elem *insn_elem;
for (insn_elem = define_insn_queue; insn_elem ; insn_elem = insn_elem->next)
{
int alternatives, max_operand;
@@ -1838,10 +1838,10 @@ process_one_cond_exec (struct queue_elem *ce_elem)
was applied, ELEM would be deleted. */
static void
-process_substs_on_one_elem (struct queue_elem *elem,
- struct queue_elem *queue)
+process_substs_on_one_elem (class queue_elem *elem,
+ class queue_elem *queue)
{
- struct queue_elem *subst_elem;
+ class queue_elem *subst_elem;
int i, j, patterns_match;
for (subst_elem = define_subst_queue;
@@ -2248,7 +2248,7 @@ subst_dup (rtx pattern, int n_alt, int n_subst_alt)
static void
process_define_cond_exec (void)
{
- struct queue_elem *elem;
+ class queue_elem *elem;
identify_predicable_attribute ();
if (have_error)
@@ -2264,7 +2264,7 @@ process_define_cond_exec (void)
static void
process_define_subst (void)
{
- struct queue_elem *elem, *elem_attr;
+ class queue_elem *elem, *elem_attr;
/* Check if each define_subst has corresponding define_subst_attr. */
for (elem = define_subst_queue; elem ; elem = elem->next)
@@ -2475,7 +2475,7 @@ mnemonic_htab_callback (void **slot, void *info ATTRIBUTE_UNUSED)
static void
gen_mnemonic_attr (void)
{
- struct queue_elem *elem;
+ class queue_elem *elem;
rtx mnemonic_attr = NULL;
htab_t mnemonic_htab;
const char *str, *p;
@@ -2552,7 +2552,7 @@ gen_mnemonic_attr (void)
static void
check_define_attr_duplicates ()
{
- struct queue_elem *elem;
+ class queue_elem *elem;
htab_t attr_htab;
char * attr_name;
void **slot;
@@ -2648,7 +2648,7 @@ read_md_rtx (md_rtx_info *info)
to use elided pattern numbers for anything. */
do
{
- struct queue_elem **queue, *elem;
+ class queue_elem **queue, *elem;
/* Read all patterns from a given queue before moving on to the next. */
if (define_attr_queue != NULL)
diff --git a/gcc/ggc-page.c b/gcc/ggc-page.c
index a95ff466704..a2736bc1dfa 100644
--- a/gcc/ggc-page.c
+++ b/gcc/ggc-page.c
@@ -200,7 +200,7 @@ static const size_t extra_order_size_table[] = {
sizeof (struct function),
sizeof (struct basic_block_def),
sizeof (struct cgraph_node),
- sizeof (struct loop),
+ sizeof (class loop),
};
/* The total number of orders. */
diff --git a/gcc/gimple-loop-interchange.cc b/gcc/gimple-loop-interchange.cc
index 1324489e85a..b56155b1fef 100644
--- a/gcc/gimple-loop-interchange.cc
+++ b/gcc/gimple-loop-interchange.cc
@@ -159,7 +159,7 @@ dump_reduction (reduction_p re)
/* Dump LOOP's induction IV. */
static void
-dump_induction (struct loop *loop, induction_p iv)
+dump_induction (class loop *loop, induction_p iv)
{
fprintf (dump_file, " Induction: ");
print_generic_expr (dump_file, iv->var, TDF_SLIM);
@@ -175,7 +175,7 @@ dump_induction (struct loop *loop, induction_p iv)
class loop_cand
{
public:
- loop_cand (struct loop *, struct loop *);
+ loop_cand (class loop *, class loop *);
~loop_cand ();
reduction_p find_reduction_by_stmt (gimple *);
@@ -189,10 +189,10 @@ public:
void undo_simple_reduction (reduction_p, bitmap);
/* The loop itself. */
- struct loop *m_loop;
+ class loop *m_loop;
/* The outer loop for interchange. It equals to loop if this loop cand
itself represents the outer loop. */
- struct loop *m_outer;
+ class loop *m_outer;
/* Vector of induction variables in loop. */
vec<induction_p> m_inductions;
/* Vector of reduction variables in loop. */
@@ -211,7 +211,7 @@ public:
/* Constructor. */
-loop_cand::loop_cand (struct loop *loop, struct loop *outer)
+loop_cand::loop_cand (class loop *loop, class loop *outer)
: m_loop (loop), m_outer (outer), m_exit (single_exit (loop)),
m_bbs (get_loop_body (loop)), m_num_stmts (0), m_const_init_reduc (0)
{
@@ -241,7 +241,7 @@ loop_cand::~loop_cand ()
/* Return single use stmt of VAR in LOOP, otherwise return NULL. */
static gimple *
-single_use_in_loop (tree var, struct loop *loop)
+single_use_in_loop (tree var, class loop *loop)
{
gimple *stmt, *res = NULL;
use_operand_p use_p;
@@ -951,7 +951,7 @@ free_data_refs_with_aux (vec<data_reference_p> datarefs)
class tree_loop_interchange
{
public:
- tree_loop_interchange (vec<struct loop *> loop_nest)
+ tree_loop_interchange (vec<class loop *> loop_nest)
: m_loop_nest (loop_nest), m_niters_iv_var (NULL_TREE),
m_dce_seeds (BITMAP_ALLOC (NULL)) { }
~tree_loop_interchange () { BITMAP_FREE (m_dce_seeds); }
@@ -962,10 +962,10 @@ private:
bool valid_data_dependences (unsigned, unsigned, vec<ddr_p>);
void interchange_loops (loop_cand &, loop_cand &);
void map_inductions_to_loop (loop_cand &, loop_cand &);
- void move_code_to_inner_loop (struct loop *, struct loop *, basic_block *);
+ void move_code_to_inner_loop (class loop *, class loop *, basic_block *);
/* The whole loop nest in which interchange is ongoing. */
- vec<struct loop *> m_loop_nest;
+ vec<class loop *> m_loop_nest;
/* We create new IV which is only used in loop's exit condition check.
In case of 3-level loop nest interchange, when we interchange the
innermost two loops, new IV created in the middle level loop does
@@ -1079,7 +1079,7 @@ tree_loop_interchange::interchange_loops (loop_cand &iloop, loop_cand &oloop)
}
/* Prepare niters for both loops. */
- struct loop *loop_nest = m_loop_nest[0];
+ class loop *loop_nest = m_loop_nest[0];
edge instantiate_below = loop_preheader_edge (loop_nest);
gsi = gsi_last_bb (loop_preheader_edge (loop_nest)->src);
i_niters = number_of_latch_executions (iloop.m_loop);
@@ -1214,8 +1214,8 @@ tree_loop_interchange::map_inductions_to_loop (loop_cand &src, loop_cand &tgt)
/* Move stmts of outer loop to inner loop. */
void
-tree_loop_interchange::move_code_to_inner_loop (struct loop *outer,
- struct loop *inner,
+tree_loop_interchange::move_code_to_inner_loop (class loop *outer,
+ class loop *inner,
basic_block *outer_bbs)
{
basic_block oloop_exit_bb = single_exit (outer)->src;
@@ -1276,7 +1276,7 @@ tree_loop_interchange::move_code_to_inner_loop (struct loop *outer,
arr[i][j - 1][k] = 0; */
static void
-compute_access_stride (struct loop *loop_nest, struct loop *loop,
+compute_access_stride (class loop *loop_nest, class loop *loop,
data_reference_p dr)
{
vec<tree> *strides = new vec<tree> ();
@@ -1320,10 +1320,10 @@ compute_access_stride (struct loop *loop_nest, struct loop *loop,
if (! chrec_contains_undetermined (scev))
{
tree sl = scev;
- struct loop *expected = loop;
+ class loop *expected = loop;
while (TREE_CODE (sl) == POLYNOMIAL_CHREC)
{
- struct loop *sl_loop = get_chrec_loop (sl);
+ class loop *sl_loop = get_chrec_loop (sl);
while (sl_loop != expected)
{
strides->safe_push (size_int (0));
@@ -1351,8 +1351,8 @@ compute_access_stride (struct loop *loop_nest, struct loop *loop,
all data references. If access strides cannot be computed at least
for two levels of loop for any data reference, it returns NULL. */
-static struct loop *
-compute_access_strides (struct loop *loop_nest, struct loop *loop,
+static class loop *
+compute_access_strides (class loop *loop_nest, class loop *loop,
vec<data_reference_p> datarefs)
{
unsigned i, j, num_loops = (unsigned) -1;
@@ -1390,8 +1390,8 @@ compute_access_strides (struct loop *loop_nest, struct loop *loop,
of loops that isn't in current LOOP_NEST. */
static void
-prune_access_strides_not_in_loop (struct loop *loop_nest,
- struct loop *innermost,
+prune_access_strides_not_in_loop (class loop *loop_nest,
+ class loop *innermost,
vec<data_reference_p> datarefs)
{
data_reference_p dr;
@@ -1712,7 +1712,7 @@ public:
nest with LOOP. */
static bool
-proper_loop_form_for_interchange (struct loop *loop, struct loop **min_outer)
+proper_loop_form_for_interchange (class loop *loop, class loop **min_outer)
{
edge e0, e1, exit;
@@ -1811,14 +1811,14 @@ proper_loop_form_for_interchange (struct loop *loop, struct loop **min_outer)
should be interchanged by looking into all DATAREFS. */
static bool
-should_interchange_loop_nest (struct loop *loop_nest, struct loop *innermost,
+should_interchange_loop_nest (class loop *loop_nest, class loop *innermost,
vec<data_reference_p> datarefs)
{
unsigned idx = loop_depth (innermost) - loop_depth (loop_nest);
gcc_assert (idx > 0);
/* Check if any two adjacent loops should be interchanged. */
- for (struct loop *loop = innermost;
+ for (class loop *loop = innermost;
loop != loop_nest; loop = loop_outer (loop), idx--)
if (should_interchange_loops (idx, idx - 1, datarefs, 0, 0,
loop == innermost, false))
@@ -1838,7 +1838,7 @@ tree_loop_interchange_compute_ddrs (vec<loop_p> loop_nest,
vec<ddr_p> *ddrs)
{
struct data_reference *a, *b;
- struct loop *innermost = loop_nest.last ();
+ class loop *innermost = loop_nest.last ();
for (unsigned i = 0; datarefs.iterate (i, &a); ++i)
{
@@ -1880,7 +1880,7 @@ tree_loop_interchange_compute_ddrs (vec<loop_p> loop_nest,
/* Prune DATAREFS by removing any data reference not inside of LOOP. */
static inline void
-prune_datarefs_not_in_loop (struct loop *loop, vec<data_reference_p> datarefs)
+prune_datarefs_not_in_loop (class loop *loop, vec<data_reference_p> datarefs)
{
unsigned i, j;
struct data_reference *dr;
@@ -1907,10 +1907,10 @@ prune_datarefs_not_in_loop (struct loop *loop, vec<data_reference_p> datarefs)
inner loop of that basic block's father loop. On success, return the
outer loop of the result loop nest. */
-static struct loop *
-prepare_data_references (struct loop *loop, vec<data_reference_p> *datarefs)
+static class loop *
+prepare_data_references (class loop *loop, vec<data_reference_p> *datarefs)
{
- struct loop *loop_nest = loop;
+ class loop *loop_nest = loop;
vec<data_reference_p> *bb_refs;
basic_block bb, *bbs = get_loop_body_in_dom_order (loop);
@@ -1974,11 +1974,11 @@ prepare_data_references (struct loop *loop, vec<data_reference_p> *datarefs)
in interchange. */
static bool
-prepare_perfect_loop_nest (struct loop *loop, vec<loop_p> *loop_nest,
+prepare_perfect_loop_nest (class loop *loop, vec<loop_p> *loop_nest,
vec<data_reference_p> *datarefs, vec<ddr_p> *ddrs)
{
- struct loop *start_loop = NULL, *innermost = loop;
- struct loop *outermost = loops_for_fn (cfun)->tree_root;
+ class loop *start_loop = NULL, *innermost = loop;
+ class loop *outermost = loops_for_fn (cfun)->tree_root;
/* Find loop nest from the innermost loop. The outermost is the innermost
outer*/
@@ -2064,7 +2064,7 @@ pass_linterchange::execute (function *fun)
return 0;
bool changed_p = false;
- struct loop *loop;
+ class loop *loop;
FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
{
vec<loop_p> loop_nest = vNULL;
diff --git a/gcc/gimple-loop-jam.c b/gcc/gimple-loop-jam.c
index 90ddbf37b07..11153f54025 100644
--- a/gcc/gimple-loop-jam.c
+++ b/gcc/gimple-loop-jam.c
@@ -103,11 +103,11 @@ along with GCC; see the file COPYING3. If not see
to the OLD loop or the outer loop of OLD now is inside LOOP. */
static void
-merge_loop_tree (struct loop *loop, struct loop *old)
+merge_loop_tree (class loop *loop, class loop *old)
{
basic_block *bbs;
int i, n;
- struct loop *subloop;
+ class loop *subloop;
edge e;
edge_iterator ei;
@@ -186,11 +186,11 @@ bb_prevents_fusion_p (basic_block bb)
If so return true, otherwise return false. */
static bool
-unroll_jam_possible_p (struct loop *outer, struct loop *loop)
+unroll_jam_possible_p (class loop *outer, class loop *loop)
{
basic_block *bbs;
int i, n;
- struct tree_niter_desc niter;
+ class tree_niter_desc niter;
/* When fusing the loops we skip the latch block
of the first one, so it mustn't have any effects to
@@ -301,9 +301,9 @@ unroll_jam_possible_p (struct loop *outer, struct loop *loop)
be in appropriate form. */
static void
-fuse_loops (struct loop *loop)
+fuse_loops (class loop *loop)
{
- struct loop *next = loop->next;
+ class loop *next = loop->next;
while (next)
{
@@ -353,7 +353,7 @@ fuse_loops (struct loop *loop)
merge_loop_tree (loop, next);
gcc_assert (!next->num_nodes);
- struct loop *ln = next->next;
+ class loop *ln = next->next;
delete_loop (next);
next = ln;
}
@@ -422,7 +422,7 @@ adjust_unroll_factor (struct data_dependence_relation *ddr,
static unsigned int
tree_loop_unroll_and_jam (void)
{
- struct loop *loop;
+ class loop *loop;
bool changed = false;
gcc_assert (scev_initialized_p ());
@@ -430,7 +430,7 @@ tree_loop_unroll_and_jam (void)
/* Go through all innermost loops. */
FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
{
- struct loop *outer = loop_outer (loop);
+ class loop *outer = loop_outer (loop);
if (loop_depth (loop) < 2
|| optimize_loop_nest_for_size_p (outer))
@@ -442,7 +442,7 @@ tree_loop_unroll_and_jam (void)
vec<data_reference_p> datarefs;
vec<ddr_p> dependences;
unsigned unroll_factor, profit_unroll, removed;
- struct tree_niter_desc desc;
+ class tree_niter_desc desc;
bool unroll = false;
auto_vec<loop_p, 3> loop_nest;
diff --git a/gcc/gimple-loop-versioning.cc b/gcc/gimple-loop-versioning.cc
index 468f5860d69..be8c2d89cfd 100644
--- a/gcc/gimple-loop-versioning.cc
+++ b/gcc/gimple-loop-versioning.cc
@@ -190,7 +190,7 @@ public:
/* The loop containing STMT (cached for convenience). If multiple
statements share the same address, they all belong to this loop. */
- struct loop *loop;
+ class loop *loop;
/* A decomposition of the calculation into a sum of terms plus an
optional base. When BASE is provided, it is never an SSA name.
@@ -229,7 +229,7 @@ public:
/* The outermost loop that can handle all the version checks
described below. */
- struct loop *outermost;
+ class loop *outermost;
/* The first entry in the list of blocks that belong to this loop
(and not to subloops). m_next_block_in_loop provides the chain
@@ -242,7 +242,7 @@ public:
/* If versioning succeeds, this points the version of the loop that
assumes the version conditions holds. */
- struct loop *optimized_loop;
+ class loop *optimized_loop;
};
/* The main pass structure. */
@@ -285,9 +285,9 @@ private:
loop_info &m_li;
};
- loop_info &get_loop_info (struct loop *loop) { return m_loops[loop->num]; }
+ loop_info &get_loop_info (class loop *loop) { return m_loops[loop->num]; }
- unsigned int max_insns_for_loop (struct loop *);
+ unsigned int max_insns_for_loop (class loop *);
bool expensive_stmt_p (gimple *);
void version_for_unity (gimple *, tree);
@@ -298,7 +298,7 @@ private:
inner_likelihood get_inner_likelihood (tree, unsigned HOST_WIDE_INT);
void dump_inner_likelihood (address_info &, address_term_info &);
void analyze_stride (address_info &, address_term_info &,
- tree, struct loop *);
+ tree, class loop *);
bool find_per_loop_multiplication (address_info &, address_term_info &);
bool analyze_term_using_scevs (address_info &, address_term_info &);
void analyze_arbitrary_term (address_info &, address_term_info &);
@@ -309,15 +309,15 @@ private:
bool analyze_block (basic_block);
bool analyze_blocks ();
- void prune_loop_conditions (struct loop *, vr_values *);
+ void prune_loop_conditions (class loop *, vr_values *);
bool prune_conditions ();
- void merge_loop_info (struct loop *, struct loop *);
- void add_loop_to_queue (struct loop *);
- bool decide_whether_loop_is_versionable (struct loop *);
+ void merge_loop_info (class loop *, class loop *);
+ void add_loop_to_queue (class loop *);
+ bool decide_whether_loop_is_versionable (class loop *);
bool make_versioning_decisions ();
- bool version_loop (struct loop *);
+ bool version_loop (class loop *);
void implement_versioning_decisions ();
/* The function we're optimizing. */
@@ -348,7 +348,7 @@ private:
auto_vec<basic_block> m_next_block_in_loop;
/* The list of loops that we've decided to version. */
- auto_vec<struct loop *> m_loops_to_version;
+ auto_vec<class loop *> m_loops_to_version;
/* A table of addresses in the current loop, keyed off their values
but not their offsets. */
@@ -602,7 +602,7 @@ loop_versioning::~loop_versioning ()
interchange or outer-loop vectorization). */
unsigned int
-loop_versioning::max_insns_for_loop (struct loop *loop)
+loop_versioning::max_insns_for_loop (class loop *loop)
{
return (loop->inner
? PARAM_VALUE (PARAM_LOOP_VERSIONING_MAX_OUTER_INSNS)
@@ -633,7 +633,7 @@ loop_versioning::expensive_stmt_p (gimple *stmt)
void
loop_versioning::version_for_unity (gimple *stmt, tree name)
{
- struct loop *loop = loop_containing_stmt (stmt);
+ class loop *loop = loop_containing_stmt (stmt);
loop_info &li = get_loop_info (loop);
if (bitmap_set_bit (&li.unity_names, SSA_NAME_VERSION (name)))
@@ -641,7 +641,7 @@ loop_versioning::version_for_unity (gimple *stmt, tree name)
/* This is the first time we've wanted to version LOOP for NAME.
Keep track of the outermost loop that can handle all versioning
checks in LI. */
- struct loop *outermost
+ class loop *outermost
= outermost_invariant_loop_for_expr (loop, name);
if (loop_depth (li.outermost) < loop_depth (outermost))
li.outermost = outermost;
@@ -834,7 +834,7 @@ loop_versioning::dump_inner_likelihood (address_info &address,
void
loop_versioning::analyze_stride (address_info &address,
address_term_info &term,
- tree stride, struct loop *op_loop)
+ tree stride, class loop *op_loop)
{
term.stride = stride;
@@ -895,7 +895,7 @@ loop_versioning::find_per_loop_multiplication (address_info &address,
if (!mult || gimple_assign_rhs_code (mult) != MULT_EXPR)
return false;
- struct loop *mult_loop = loop_containing_stmt (mult);
+ class loop *mult_loop = loop_containing_stmt (mult);
if (!loop_outer (mult_loop))
return false;
@@ -937,7 +937,7 @@ loop_versioning::analyze_term_using_scevs (address_info &address,
if (!setter)
return false;
- struct loop *wrt_loop = loop_containing_stmt (setter);
+ class loop *wrt_loop = loop_containing_stmt (setter);
if (!loop_outer (wrt_loop))
return false;
@@ -1199,7 +1199,7 @@ loop_versioning::record_address_fragment (gimple *stmt,
/* Quick exit if no part of the address is calculated in STMT's loop,
since such addresses have no versioning opportunities. */
- struct loop *loop = loop_containing_stmt (stmt);
+ class loop *loop = loop_containing_stmt (stmt);
if (expr_invariant_in_loop_p (loop, expr))
return;
@@ -1375,7 +1375,7 @@ loop_versioning::analyze_expr (gimple *stmt, tree expr)
bool
loop_versioning::analyze_block (basic_block bb)
{
- struct loop *loop = bb->loop_father;
+ class loop *loop = bb->loop_father;
loop_info &li = get_loop_info (loop);
for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
gsi_next (&gsi))
@@ -1424,7 +1424,7 @@ loop_versioning::analyze_blocks ()
versioning at that level could be useful in some cases. */
get_loop_info (get_loop (m_fn, 0)).rejected_p = true;
- struct loop *loop;
+ class loop *loop;
FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
{
loop_info &linfo = get_loop_info (loop);
@@ -1435,7 +1435,7 @@ loop_versioning::analyze_blocks ()
/* See whether an inner loop prevents versioning of this loop. */
if (!linfo.rejected_p)
- for (struct loop *inner = loop->inner; inner; inner = inner->next)
+ for (class loop *inner = loop->inner; inner; inner = inner->next)
if (get_loop_info (inner).rejected_p)
{
linfo.rejected_p = true;
@@ -1479,7 +1479,7 @@ loop_versioning::analyze_blocks ()
LOOP. */
void
-loop_versioning::prune_loop_conditions (struct loop *loop, vr_values *vrs)
+loop_versioning::prune_loop_conditions (class loop *loop, vr_values *vrs)
{
loop_info &li = get_loop_info (loop);
@@ -1525,7 +1525,7 @@ loop_versioning::prune_conditions ()
OUTER. */
void
-loop_versioning::merge_loop_info (struct loop *outer, struct loop *inner)
+loop_versioning::merge_loop_info (class loop *outer, class loop *inner)
{
loop_info &inner_li = get_loop_info (inner);
loop_info &outer_li = get_loop_info (outer);
@@ -1549,7 +1549,7 @@ loop_versioning::merge_loop_info (struct loop *outer, struct loop *inner)
/* Add LOOP to the queue of loops to version. */
void
-loop_versioning::add_loop_to_queue (struct loop *loop)
+loop_versioning::add_loop_to_queue (class loop *loop)
{
loop_info &li = get_loop_info (loop);
@@ -1571,7 +1571,7 @@ loop_versioning::add_loop_to_queue (struct loop *loop)
We have already made this decision for all inner loops of LOOP. */
bool
-loop_versioning::decide_whether_loop_is_versionable (struct loop *loop)
+loop_versioning::decide_whether_loop_is_versionable (class loop *loop)
{
loop_info &li = get_loop_info (loop);
@@ -1579,7 +1579,7 @@ loop_versioning::decide_whether_loop_is_versionable (struct loop *loop)
return false;
/* Examine the decisions made for inner loops. */
- for (struct loop *inner = loop->inner; inner; inner = inner->next)
+ for (class loop *inner = loop->inner; inner; inner = inner->next)
{
loop_info &inner_li = get_loop_info (inner);
if (inner_li.rejected_p)
@@ -1631,7 +1631,7 @@ loop_versioning::decide_whether_loop_is_versionable (struct loop *loop)
}
/* Hoist all version checks from subloops to this loop. */
- for (struct loop *subloop = loop->inner; subloop; subloop = subloop->next)
+ for (class loop *subloop = loop->inner; subloop; subloop = subloop->next)
merge_loop_info (loop, subloop);
return true;
@@ -1646,7 +1646,7 @@ loop_versioning::make_versioning_decisions ()
AUTO_DUMP_SCOPE ("make_versioning_decisions",
dump_user_location_t::from_function_decl (m_fn->decl));
- struct loop *loop;
+ class loop *loop;
FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
{
loop_info &linfo = get_loop_info (loop);
@@ -1663,7 +1663,7 @@ loop_versioning::make_versioning_decisions ()
/* We can't version this loop, so individually version any
subloops that would benefit and haven't been versioned yet. */
linfo.rejected_p = true;
- for (struct loop *subloop = loop->inner; subloop;
+ for (class loop *subloop = loop->inner; subloop;
subloop = subloop->next)
if (get_loop_info (subloop).worth_versioning_p ())
add_loop_to_queue (subloop);
@@ -1677,7 +1677,7 @@ loop_versioning::make_versioning_decisions ()
cached in the associated loop_info. Return true on success. */
bool
-loop_versioning::version_loop (struct loop *loop)
+loop_versioning::version_loop (class loop *loop)
{
loop_info &li = get_loop_info (loop);
@@ -1739,7 +1739,7 @@ loop_versioning::implement_versioning_decisions ()
user-facing at this point. */
bool any_succeeded_p = false;
- struct loop *loop;
+ class loop *loop;
unsigned int i;
FOR_EACH_VEC_ELT (m_loops_to_version, i, loop)
if (version_loop (loop))
diff --git a/gcc/gimple-ssa-evrp-analyze.c b/gcc/gimple-ssa-evrp-analyze.c
index 4c68af847e1..46f5a019776 100644
--- a/gcc/gimple-ssa-evrp-analyze.c
+++ b/gcc/gimple-ssa-evrp-analyze.c
@@ -262,7 +262,7 @@ evrp_range_analyzer::record_ranges_from_phis (basic_block bb)
use PHI arg ranges which may be still UNDEFINED but have
to use VARYING for them. But we can still resort to
SCEV for loop header PHIs. */
- struct loop *l;
+ class loop *l;
if (scev_initialized_p ()
&& interesting
&& (l = loop_containing_stmt (phi))
diff --git a/gcc/gimple-ssa-store-merging.c b/gcc/gimple-ssa-store-merging.c
index 1f50e3af03e..0bf64b314d6 100644
--- a/gcc/gimple-ssa-store-merging.c
+++ b/gcc/gimple-ssa-store-merging.c
@@ -2159,7 +2159,7 @@ public:
virtual unsigned int execute (function *);
private:
- hash_map<tree_operand_hash, struct imm_store_chain_info *> m_stores;
+ hash_map<tree_operand_hash, class imm_store_chain_info *> m_stores;
/* Form a doubly-linked stack of the elements of m_stores, so that
we can iterate over them in a predictable way. Using this order
@@ -3097,7 +3097,7 @@ split_store::split_store (unsigned HOST_WIDE_INT bp,
if there is exactly one original store in the range. */
static store_immediate_info *
-find_constituent_stores (struct merged_store_group *group,
+find_constituent_stores (class merged_store_group *group,
vec<store_immediate_info *> *stores,
unsigned int *first,
unsigned HOST_WIDE_INT bitpos,
@@ -3240,7 +3240,7 @@ count_multiple_uses (store_immediate_info *info)
static unsigned int
split_group (merged_store_group *group, bool allow_unaligned_store,
bool allow_unaligned_load, bool bzero_first,
- vec<struct split_store *> *split_stores,
+ vec<split_store *> *split_stores,
unsigned *total_orig,
unsigned *total_new)
{
@@ -3277,7 +3277,7 @@ split_group (merged_store_group *group, bool allow_unaligned_store,
if (align_bitpos)
align = least_bit_hwi (align_bitpos);
bytepos = group->start / BITS_PER_UNIT;
- struct split_store *store
+ split_store *store
= new split_store (bytepos, group->width, align);
unsigned int first = 0;
find_constituent_stores (group, &store->orig_stores,
@@ -3335,7 +3335,7 @@ split_group (merged_store_group *group, bool allow_unaligned_store,
ret = 1;
if (split_stores)
{
- struct split_store *store
+ split_store *store
= new split_store (bytepos, group->stores[0]->bitsize, align_base);
store->orig_stores.safe_push (group->stores[0]);
store->orig = true;
@@ -3462,7 +3462,7 @@ split_group (merged_store_group *group, bool allow_unaligned_store,
if (split_stores)
{
- struct split_store *store
+ split_store *store
= new split_store (try_pos, try_size, align);
info = find_constituent_stores (group, &store->orig_stores,
&first, try_bitpos, try_size);
@@ -3483,7 +3483,7 @@ split_group (merged_store_group *group, bool allow_unaligned_store,
if (total_orig)
{
unsigned int i;
- struct split_store *store;
+ split_store *store;
/* If we are reusing some original stores and any of the
original SSA_NAMEs had multiple uses, we need to subtract
those now before we add the new ones. */
@@ -3650,7 +3650,7 @@ imm_store_chain_info::output_merged_store (merged_store_group *group)
if (orig_num_stmts < 2)
return false;
- auto_vec<struct split_store *, 32> split_stores;
+ auto_vec<class split_store *, 32> split_stores;
bool allow_unaligned_store
= !STRICT_ALIGNMENT && PARAM_VALUE (PARAM_STORE_MERGING_ALLOW_UNALIGNED);
bool allow_unaligned_load = allow_unaligned_store;
@@ -4610,7 +4610,7 @@ pass_store_merging::process_store (gimple *stmt)
if (!ins_stmt)
memset (&n, 0, sizeof (n));
- struct imm_store_chain_info **chain_info = NULL;
+ class imm_store_chain_info **chain_info = NULL;
if (base_addr)
chain_info = m_stores.get (base_addr);
@@ -4646,7 +4646,7 @@ pass_store_merging::process_store (gimple *stmt)
/* Store aliases any existing chain? */
terminate_all_aliasing_chains (NULL, stmt);
/* Start a new chain. */
- struct imm_store_chain_info *new_chain
+ class imm_store_chain_info *new_chain
= new imm_store_chain_info (m_stores_head, base_addr);
info = new store_immediate_info (const_bitsize, const_bitpos,
const_bitregion_start,
diff --git a/gcc/gimple-ssa-strength-reduction.c b/gcc/gimple-ssa-strength-reduction.c
index aa5a8567a1b..d343da005e4 100644
--- a/gcc/gimple-ssa-strength-reduction.c
+++ b/gcc/gimple-ssa-strength-reduction.c
@@ -297,8 +297,8 @@ public:
tree cached_basis;
};
-typedef struct slsr_cand_d slsr_cand, *slsr_cand_t;
-typedef const struct slsr_cand_d *const_slsr_cand_t;
+typedef class slsr_cand_d slsr_cand, *slsr_cand_t;
+typedef const class slsr_cand_d *const_slsr_cand_t;
/* Pointers to candidates are chained together as part of a mapping
from base expressions to the candidates that use them. */
@@ -354,7 +354,7 @@ public:
basic_block init_bb;
};
-typedef struct incr_info_d incr_info, *incr_info_t;
+typedef class incr_info_d incr_info, *incr_info_t;
/* Candidates are maintained in a vector. If candidate X dominates
candidate Y, then X appears before Y in the vector; but the
@@ -807,7 +807,7 @@ slsr_process_phi (gphi *phi, bool speed)
unsigned i;
tree arg0_base = NULL_TREE, base_type;
slsr_cand_t c;
- struct loop *cand_loop = gimple_bb (phi)->loop_father;
+ class loop *cand_loop = gimple_bb (phi)->loop_father;
unsigned savings = 0;
/* A CAND_PHI requires each of its arguments to have the same
diff --git a/gcc/gimple-ssa-warn-alloca.c b/gcc/gimple-ssa-warn-alloca.c
index 73726c1a670..af39ff415e1 100644
--- a/gcc/gimple-ssa-warn-alloca.c
+++ b/gcc/gimple-ssa-warn-alloca.c
@@ -185,7 +185,7 @@ adjusted_warn_limit (bool idx)
// MAX_SIZE is WARN_ALLOCA= adjusted for VLAs. It is the maximum size
// in bytes we allow for arg.
-static struct alloca_type_and_limit
+static class alloca_type_and_limit
alloca_call_type_by_arg (tree arg, tree arg_casted, edge e,
unsigned HOST_WIDE_INT max_size)
{
@@ -326,7 +326,7 @@ is_max (tree x, wide_int max)
// type to an unsigned type, set *INVALID_CASTED_TYPE to the
// problematic signed type.
-static struct alloca_type_and_limit
+static class alloca_type_and_limit
alloca_call_type (gimple *stmt, bool is_vla, tree *invalid_casted_type)
{
gcc_assert (gimple_alloca_call_p (stmt));
@@ -459,7 +459,7 @@ alloca_call_type (gimple *stmt, bool is_vla, tree *invalid_casted_type)
// If we couldn't find anything, try a few heuristics for things we
// can easily determine. Check these misc cases but only accept
// them if all predecessors have a known bound.
- struct alloca_type_and_limit ret = alloca_type_and_limit (ALLOCA_OK);
+ class alloca_type_and_limit ret = alloca_type_and_limit (ALLOCA_OK);
FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->preds)
{
gcc_assert (!len_casted || TYPE_UNSIGNED (TREE_TYPE (len_casted)));
@@ -536,7 +536,7 @@ pass_walloca::execute (function *fun)
continue;
tree invalid_casted_type = NULL;
- struct alloca_type_and_limit t
+ class alloca_type_and_limit t
= alloca_call_type (stmt, is_vla, &invalid_casted_type);
unsigned HOST_WIDE_INT adjusted_alloca_limit
diff --git a/gcc/gimple-streamer-in.c b/gcc/gimple-streamer-in.c
index 3142b371aee..72a847d40fa 100644
--- a/gcc/gimple-streamer-in.c
+++ b/gcc/gimple-streamer-in.c
@@ -36,7 +36,7 @@ along with GCC; see the file COPYING3. If not see
the file being read. IB is the input block to use for reading. */
static gphi *
-input_phi (struct lto_input_block *ib, basic_block bb, struct data_in *data_in,
+input_phi (class lto_input_block *ib, basic_block bb, class data_in *data_in,
struct function *fn)
{
unsigned HOST_WIDE_INT ix;
@@ -83,7 +83,7 @@ input_phi (struct lto_input_block *ib, basic_block bb, struct data_in *data_in,
descriptors in DATA_IN. */
static gimple *
-input_gimple_stmt (struct lto_input_block *ib, struct data_in *data_in,
+input_gimple_stmt (class lto_input_block *ib, class data_in *data_in,
enum LTO_tags tag)
{
gimple *stmt;
@@ -249,8 +249,8 @@ input_gimple_stmt (struct lto_input_block *ib, struct data_in *data_in,
FN is the function being processed. */
void
-input_bb (struct lto_input_block *ib, enum LTO_tags tag,
- struct data_in *data_in, struct function *fn,
+input_bb (class lto_input_block *ib, enum LTO_tags tag,
+ class data_in *data_in, struct function *fn,
int count_materialization_scale)
{
unsigned int index;
diff --git a/gcc/gimple-streamer.h b/gcc/gimple-streamer.h
index d0d144e2346..ee36192bbac 100644
--- a/gcc/gimple-streamer.h
+++ b/gcc/gimple-streamer.h
@@ -25,7 +25,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-streamer.h"
/* In gimple-streamer-in.c */
-void input_bb (struct lto_input_block *, enum LTO_tags, struct data_in *,
+void input_bb (class lto_input_block *, enum LTO_tags, class data_in *,
struct function *, int);
/* In gimple-streamer-out.c */
diff --git a/gcc/godump.c b/gcc/godump.c
index 39cce5e6b36..ddb19fd414d 100644
--- a/gcc/godump.c
+++ b/gcc/godump.c
@@ -680,7 +680,7 @@ go_force_record_alignment (struct obstack *ob, const char *type_string,
calls from go_format_type() itself. */
static bool
-go_format_type (struct godump_container *container, tree type,
+go_format_type (class godump_container *container, tree type,
bool use_type_name, bool is_func_ok, unsigned int *p_art_i,
bool is_anon_record_or_union)
{
@@ -1092,7 +1092,7 @@ go_format_type (struct godump_container *container, tree type,
it. */
static void
-go_output_type (struct godump_container *container)
+go_output_type (class godump_container *container)
{
struct obstack *ob;
@@ -1105,7 +1105,7 @@ go_output_type (struct godump_container *container)
/* Output a function declaration. */
static void
-go_output_fndecl (struct godump_container *container, tree decl)
+go_output_fndecl (class godump_container *container, tree decl)
{
if (!go_format_type (container, TREE_TYPE (decl), false, true, NULL, false))
fprintf (go_dump_file, "// ");
@@ -1119,7 +1119,7 @@ go_output_fndecl (struct godump_container *container, tree decl)
/* Output a typedef or something like a struct definition. */
static void
-go_output_typedef (struct godump_container *container, tree decl)
+go_output_typedef (class godump_container *container, tree decl)
{
/* If we have an enum type, output the enum constants
separately. */
@@ -1246,7 +1246,7 @@ go_output_typedef (struct godump_container *container, tree decl)
/* Output a variable. */
static void
-go_output_var (struct godump_container *container, tree decl)
+go_output_var (class godump_container *container, tree decl)
{
bool is_valid;
tree type_name;
@@ -1335,7 +1335,7 @@ static const char * const keywords[] = {
};
static void
-keyword_hash_init (struct godump_container *container)
+keyword_hash_init (class godump_container *container)
{
size_t i;
size_t count = sizeof (keywords) / sizeof (keywords[0]);
@@ -1355,7 +1355,7 @@ keyword_hash_init (struct godump_container *container)
bool
find_dummy_types (const char *const &ptr, godump_container *adata)
{
- struct godump_container *data = (struct godump_container *) adata;
+ class godump_container *data = (class godump_container *) adata;
const char *type = (const char *) ptr;
void **slot;
void **islot;
@@ -1372,7 +1372,7 @@ find_dummy_types (const char *const &ptr, godump_container *adata)
static void
go_finish (const char *filename)
{
- struct godump_container container;
+ class godump_container container;
unsigned int ix;
tree decl;
diff --git a/gcc/graph.c b/gcc/graph.c
index 33e4c0356ee..5452822f21e 100644
--- a/gcc/graph.c
+++ b/gcc/graph.c
@@ -197,7 +197,7 @@ draw_cfg_nodes_no_loops (pretty_printer *pp, struct function *fun)
static void
draw_cfg_nodes_for_loop (pretty_printer *pp, int funcdef_no,
- struct loop *loop)
+ class loop *loop)
{
basic_block *body;
unsigned int i;
@@ -217,7 +217,7 @@ draw_cfg_nodes_for_loop (pretty_printer *pp, int funcdef_no,
fillcolors[(loop_depth (loop) - 1) % 3],
loop->num);
- for (struct loop *inner = loop->inner; inner; inner = inner->next)
+ for (class loop *inner = loop->inner; inner; inner = inner->next)
draw_cfg_nodes_for_loop (pp, funcdef_no, inner);
if (loop->header == NULL)
diff --git a/gcc/hard-reg-set.h b/gcc/hard-reg-set.h
index a72819662fb..bd4249b5a17 100644
--- a/gcc/hard-reg-set.h
+++ b/gcc/hard-reg-set.h
@@ -613,8 +613,8 @@ hard_reg_set_iter_next (hard_reg_set_iterator *iter, unsigned *regno)
extern char global_regs[FIRST_PSEUDO_REGISTER];
-struct simplifiable_subreg;
-struct subreg_shape;
+class simplifiable_subreg;
+class subreg_shape;
struct simplifiable_subregs_hasher : nofree_ptr_hash <simplifiable_subreg>
{
diff --git a/gcc/hsa-brig.c b/gcc/hsa-brig.c
index 424eac7ef47..45f4149969a 100644
--- a/gcc/hsa-brig.c
+++ b/gcc/hsa-brig.c
@@ -150,9 +150,8 @@ struct hsa_brig_data_chunk
/* Structure representing a BRIG section, holding and writing its data. */
-class hsa_brig_section
+struct hsa_brig_section
{
-public:
/* Section name that will be output to the BRIG. */
const char *section_name;
/* Size in bytes of all data stored in the section. */
@@ -579,7 +578,7 @@ static void emit_immediate_operand (hsa_op_immed *imm);
Return the offset of the directive. */
static unsigned
-emit_directive_variable (struct hsa_symbol *symbol)
+emit_directive_variable (class hsa_symbol *symbol)
{
struct BrigDirectiveVariable dirvar;
unsigned name_offset;
diff --git a/gcc/hsa-common.h b/gcc/hsa-common.h
index 121c4eab8e7..912253974b6 100644
--- a/gcc/hsa-common.h
+++ b/gcc/hsa-common.h
@@ -1068,7 +1068,7 @@ private:
static inline hsa_bb *
hsa_bb_for_bb (basic_block bb)
{
- return (struct hsa_bb *) bb->aux;
+ return (class hsa_bb *) bb->aux;
}
/* Class for hashing local hsa_symbols. */
@@ -1150,14 +1150,14 @@ public:
hash_map <tree, hsa_symbol *> m_string_constants_map;
/* Vector of pointers to spill symbols. */
- vec <struct hsa_symbol *> m_spill_symbols;
+ vec <class hsa_symbol *> m_spill_symbols;
/* Vector of pointers to global variables and transformed string constants
that are used by the function. */
- vec <struct hsa_symbol *> m_global_symbols;
+ vec <class hsa_symbol *> m_global_symbols;
/* Private function artificial variables. */
- vec <struct hsa_symbol *> m_private_variables;
+ vec <class hsa_symbol *> m_private_variables;
/* Vector of called function declarations. */
vec <tree> m_called_functions;
@@ -1318,7 +1318,7 @@ hsa_internal_fn_hasher::equal (const value_type a, const compare_type b)
}
/* in hsa-common.c */
-extern struct hsa_function_representation *hsa_cfun;
+extern class hsa_function_representation *hsa_cfun;
extern hash_map <tree, vec <const char *> *> *hsa_decl_kernel_dependencies;
extern hsa_summary_t *hsa_summaries;
extern hsa_symbol *hsa_num_threads;
diff --git a/gcc/hsa-dump.c b/gcc/hsa-dump.c
index 332d89d1289..2d856012c4a 100644
--- a/gcc/hsa-dump.c
+++ b/gcc/hsa-dump.c
@@ -1229,7 +1229,7 @@ dump_hsa_cfun (FILE *f)
FOR_ALL_BB_FN (bb, cfun)
{
- hsa_bb *hbb = (struct hsa_bb *) bb->aux;
+ hsa_bb *hbb = (class hsa_bb *) bb->aux;
dump_hsa_bb (f, hbb);
}
}
diff --git a/gcc/hsa-gen.c b/gcc/hsa-gen.c
index 54d98a5f507..26e1e2496bf 100644
--- a/gcc/hsa-gen.c
+++ b/gcc/hsa-gen.c
@@ -6070,7 +6070,7 @@ gen_function_def_parameters ()
for (parm = DECL_ARGUMENTS (cfun->decl); parm;
parm = DECL_CHAIN (parm))
{
- struct hsa_symbol **slot;
+ class hsa_symbol **slot;
hsa_symbol *arg
= new hsa_symbol (BRIG_TYPE_NONE, hsa_cfun->m_kern_p
@@ -6128,7 +6128,7 @@ gen_function_def_parameters ()
if (!VOID_TYPE_P (TREE_TYPE (TREE_TYPE (cfun->decl))))
{
- struct hsa_symbol **slot;
+ class hsa_symbol **slot;
hsa_cfun->m_output_arg = new hsa_symbol (BRIG_TYPE_NONE, BRIG_SEGMENT_ARG,
BRIG_LINKAGE_FUNCTION);
diff --git a/gcc/hsa-regalloc.c b/gcc/hsa-regalloc.c
index d0c160939f7..597bb666c6e 100644
--- a/gcc/hsa-regalloc.c
+++ b/gcc/hsa-regalloc.c
@@ -256,7 +256,7 @@ dump_hsa_cfun_regalloc (FILE *f)
FOR_ALL_BB_FN (bb, cfun)
{
- hsa_bb *hbb = (struct hsa_bb *) bb->aux;
+ hsa_bb *hbb = (class hsa_bb *) bb->aux;
bitmap_print (dump_file, hbb->m_livein, "m_livein ", "\n");
dump_hsa_bb (f, hbb);
bitmap_print (dump_file, hbb->m_liveout, "m_liveout ", "\n");
diff --git a/gcc/input.c b/gcc/input.c
index baab42af9ae..00301ef68dd 100644
--- a/gcc/input.c
+++ b/gcc/input.c
@@ -124,14 +124,14 @@ public:
location_t input_location = UNKNOWN_LOCATION;
-struct line_maps *line_table;
+class line_maps *line_table;
/* A stashed copy of "line_table" for use by selftest::line_table_test.
This needs to be a global so that it can be a GC root, and thus
prevent the stashed copy from being garbage-collected if the GC runs
during a line_table_test. */
-struct line_maps *saved_line_table;
+class line_maps *saved_line_table;
static fcache *fcache_tab;
static const size_t fcache_tab_size = 16;
@@ -980,7 +980,7 @@ dump_line_table_statistics (void)
/* Get location one beyond the final location in ordinary map IDX. */
static location_t
-get_end_location (struct line_maps *set, unsigned int idx)
+get_end_location (class line_maps *set, unsigned int idx)
{
if (idx == LINEMAPS_ORDINARY_USED (set) - 1)
return set->highest_location;
@@ -2051,7 +2051,7 @@ test_lexer (const line_table_case &case_)
/* Forward decls. */
-struct lexer_test;
+class lexer_test;
class lexer_test_options;
/* A class for specifying options of a lexer_test.
diff --git a/gcc/input.h b/gcc/input.h
index 3e038207bd0..c459bf28553 100644
--- a/gcc/input.h
+++ b/gcc/input.h
@@ -23,8 +23,8 @@ along with GCC; see the file COPYING3. If not see
#include "line-map.h"
-extern GTY(()) struct line_maps *line_table;
-extern GTY(()) struct line_maps *saved_line_table;
+extern GTY(()) class line_maps *line_table;
+extern GTY(()) class line_maps *saved_line_table;
/* A value which will never be used to represent a real location. */
#define UNKNOWN_LOCATION ((location_t) 0)
diff --git a/gcc/internal-fn.c b/gcc/internal-fn.c
index 90f8e567d57..10673769958 100644
--- a/gcc/internal-fn.c
+++ b/gcc/internal-fn.c
@@ -149,7 +149,7 @@ get_multi_vector_move (tree array_type, convert_optab optab)
static void
expand_load_lanes_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
{
- struct expand_operand ops[2];
+ class expand_operand ops[2];
tree type, lhs, rhs;
rtx target, mem;
@@ -173,7 +173,7 @@ expand_load_lanes_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
static void
expand_store_lanes_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
{
- struct expand_operand ops[2];
+ class expand_operand ops[2];
tree type, lhs, rhs;
rtx target, reg;
@@ -227,7 +227,7 @@ expand_GOMP_SIMT_ENTER_ALLOC (internal_fn, gcall *stmt)
target = gen_reg_rtx (Pmode);
rtx size = expand_normal (gimple_call_arg (stmt, 0));
rtx align = expand_normal (gimple_call_arg (stmt, 1));
- struct expand_operand ops[3];
+ class expand_operand ops[3];
create_output_operand (&ops[0], target, Pmode);
create_input_operand (&ops[1], size, Pmode);
create_input_operand (&ops[2], align, Pmode);
@@ -242,7 +242,7 @@ expand_GOMP_SIMT_EXIT (internal_fn, gcall *stmt)
{
gcc_checking_assert (!gimple_call_lhs (stmt));
rtx arg = expand_normal (gimple_call_arg (stmt, 0));
- struct expand_operand ops[1];
+ class expand_operand ops[1];
create_input_operand (&ops[0], arg, Pmode);
gcc_assert (targetm.have_omp_simt_exit ());
expand_insn (targetm.code_for_omp_simt_exit, 1, ops);
@@ -285,7 +285,7 @@ expand_GOMP_SIMT_LAST_LANE (internal_fn, gcall *stmt)
rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
rtx cond = expand_normal (gimple_call_arg (stmt, 0));
machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
- struct expand_operand ops[2];
+ class expand_operand ops[2];
create_output_operand (&ops[0], target, mode);
create_input_operand (&ops[1], cond, mode);
gcc_assert (targetm.have_omp_simt_last_lane ());
@@ -304,7 +304,7 @@ expand_GOMP_SIMT_ORDERED_PRED (internal_fn, gcall *stmt)
rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
rtx ctr = expand_normal (gimple_call_arg (stmt, 0));
machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
- struct expand_operand ops[2];
+ class expand_operand ops[2];
create_output_operand (&ops[0], target, mode);
create_input_operand (&ops[1], ctr, mode);
gcc_assert (targetm.have_omp_simt_ordered ());
@@ -324,7 +324,7 @@ expand_GOMP_SIMT_VOTE_ANY (internal_fn, gcall *stmt)
rtx target = expand_expr (lhs, NULL_RTX, VOIDmode, EXPAND_WRITE);
rtx cond = expand_normal (gimple_call_arg (stmt, 0));
machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
- struct expand_operand ops[2];
+ class expand_operand ops[2];
create_output_operand (&ops[0], target, mode);
create_input_operand (&ops[1], cond, mode);
gcc_assert (targetm.have_omp_simt_vote_any ());
@@ -345,7 +345,7 @@ expand_GOMP_SIMT_XCHG_BFLY (internal_fn, gcall *stmt)
rtx src = expand_normal (gimple_call_arg (stmt, 0));
rtx idx = expand_normal (gimple_call_arg (stmt, 1));
machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
- struct expand_operand ops[3];
+ class expand_operand ops[3];
create_output_operand (&ops[0], target, mode);
create_input_operand (&ops[1], src, mode);
create_input_operand (&ops[2], idx, SImode);
@@ -366,7 +366,7 @@ expand_GOMP_SIMT_XCHG_IDX (internal_fn, gcall *stmt)
rtx src = expand_normal (gimple_call_arg (stmt, 0));
rtx idx = expand_normal (gimple_call_arg (stmt, 1));
machine_mode mode = TYPE_MODE (TREE_TYPE (lhs));
- struct expand_operand ops[3];
+ class expand_operand ops[3];
create_output_operand (&ops[0], target, mode);
create_input_operand (&ops[1], src, mode);
create_input_operand (&ops[2], idx, SImode);
@@ -774,7 +774,7 @@ expand_addsub_overflow (location_t loc, tree_code code, tree lhs,
: usubv4_optab, mode);
if (icode != CODE_FOR_nothing)
{
- struct expand_operand ops[4];
+ class expand_operand ops[4];
rtx_insn *last = get_last_insn ();
res = gen_reg_rtx (mode);
@@ -995,7 +995,7 @@ expand_addsub_overflow (location_t loc, tree_code code, tree lhs,
: subv4_optab, mode);
if (icode != CODE_FOR_nothing)
{
- struct expand_operand ops[4];
+ class expand_operand ops[4];
rtx_insn *last = get_last_insn ();
res = gen_reg_rtx (mode);
@@ -1146,7 +1146,7 @@ expand_neg_overflow (location_t loc, tree lhs, tree arg1, bool is_ubsan,
enum insn_code icode = optab_handler (negv3_optab, mode);
if (icode != CODE_FOR_nothing)
{
- struct expand_operand ops[3];
+ class expand_operand ops[3];
rtx_insn *last = get_last_insn ();
res = gen_reg_rtx (mode);
@@ -1539,7 +1539,7 @@ expand_mul_overflow (location_t loc, tree lhs, tree arg0, tree arg1,
}
if (icode != CODE_FOR_nothing)
{
- struct expand_operand ops[4];
+ class expand_operand ops[4];
rtx_insn *last = get_last_insn ();
res = gen_reg_rtx (mode);
@@ -2475,7 +2475,7 @@ expand_call_mem_ref (tree type, gcall *stmt, int index)
static void
expand_mask_load_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
{
- struct expand_operand ops[3];
+ class expand_operand ops[3];
tree type, lhs, rhs, maskt;
rtx mem, target, mask;
insn_code icode;
@@ -2510,7 +2510,7 @@ expand_mask_load_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
static void
expand_mask_store_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
{
- struct expand_operand ops[3];
+ class expand_operand ops[3];
tree type, lhs, rhs, maskt;
rtx mem, reg, mask;
insn_code icode;
@@ -2771,7 +2771,7 @@ expand_scatter_store_optab_fn (internal_fn, gcall *stmt, direct_optab optab)
HOST_WIDE_INT scale_int = tree_to_shwi (scale);
rtx rhs_rtx = expand_normal (rhs);
- struct expand_operand ops[6];
+ class expand_operand ops[6];
int i = 0;
create_address_operand (&ops[i++], base_rtx);
create_input_operand (&ops[i++], offset_rtx, TYPE_MODE (TREE_TYPE (offset)));
@@ -2805,7 +2805,7 @@ expand_gather_load_optab_fn (internal_fn, gcall *stmt, direct_optab optab)
HOST_WIDE_INT scale_int = tree_to_shwi (scale);
int i = 0;
- struct expand_operand ops[6];
+ class expand_operand ops[6];
create_output_operand (&ops[i++], lhs_rtx, TYPE_MODE (TREE_TYPE (lhs)));
create_address_operand (&ops[i++], base_rtx);
create_input_operand (&ops[i++], offset_rtx, TYPE_MODE (TREE_TYPE (offset)));
diff --git a/gcc/ipa-cp.c b/gcc/ipa-cp.c
index e62a9a03ef9..0fd36506c00 100644
--- a/gcc/ipa-cp.c
+++ b/gcc/ipa-cp.c
@@ -381,8 +381,8 @@ static hash_map<const char *, unsigned> *clone_num_suffixes;
/* Return the param lattices structure corresponding to the Ith formal
parameter of the function described by INFO. */
-static inline struct ipcp_param_lattices *
-ipa_get_parm_lattices (struct ipa_node_params *info, int i)
+static inline class ipcp_param_lattices *
+ipa_get_parm_lattices (class ipa_node_params *info, int i)
{
gcc_assert (i >= 0 && i < ipa_get_param_count (info));
gcc_checking_assert (!info->ipcp_orig_node);
@@ -393,18 +393,18 @@ ipa_get_parm_lattices (struct ipa_node_params *info, int i)
/* Return the lattice corresponding to the scalar value of the Ith formal
parameter of the function described by INFO. */
static inline ipcp_lattice<tree> *
-ipa_get_scalar_lat (struct ipa_node_params *info, int i)
+ipa_get_scalar_lat (class ipa_node_params *info, int i)
{
- struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
+ class ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
return &plats->itself;
}
/* Return the lattice corresponding to the scalar value of the Ith formal
parameter of the function described by INFO. */
static inline ipcp_lattice<ipa_polymorphic_call_context> *
-ipa_get_poly_ctx_lat (struct ipa_node_params *info, int i)
+ipa_get_poly_ctx_lat (class ipa_node_params *info, int i)
{
- struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
+ class ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
return &plats->ctxlat;
}
@@ -539,7 +539,7 @@ print_all_lattices (FILE * f, bool dump_sources, bool dump_benefits)
fprintf (f, "\nLattices:\n");
FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
{
- struct ipa_node_params *info;
+ class ipa_node_params *info;
info = IPA_NODE_REF (node);
/* Skip constprop clones since we don't make lattices for them. */
@@ -550,7 +550,7 @@ print_all_lattices (FILE * f, bool dump_sources, bool dump_benefits)
for (i = 0; i < count; i++)
{
struct ipcp_agg_lattice *aglat;
- struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
+ class ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
fprintf (f, " param [%d]: ", i);
plats->itself.print (f, dump_sources, dump_benefits);
fprintf (f, " ctxs: ");
@@ -585,7 +585,7 @@ print_all_lattices (FILE * f, bool dump_sources, bool dump_benefits)
static void
determine_versionability (struct cgraph_node *node,
- struct ipa_node_params *info)
+ class ipa_node_params *info)
{
const char *reason = NULL;
@@ -823,7 +823,7 @@ ignore_edge_p (cgraph_edge *e)
/* Allocate the arrays in TOPO and topologically sort the nodes into order. */
static void
-build_toporder_info (struct ipa_topo_info *topo)
+build_toporder_info (class ipa_topo_info *topo)
{
topo->order = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
topo->stack = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
@@ -837,7 +837,7 @@ build_toporder_info (struct ipa_topo_info *topo)
TOPO. */
static void
-free_toporder_info (struct ipa_topo_info *topo)
+free_toporder_info (class ipa_topo_info *topo)
{
ipa_free_postorder_info ();
free (topo->order);
@@ -847,9 +847,9 @@ free_toporder_info (struct ipa_topo_info *topo)
/* Add NODE to the stack in TOPO, unless it is already there. */
static inline void
-push_node_to_stack (struct ipa_topo_info *topo, struct cgraph_node *node)
+push_node_to_stack (class ipa_topo_info *topo, struct cgraph_node *node)
{
- struct ipa_node_params *info = IPA_NODE_REF (node);
+ class ipa_node_params *info = IPA_NODE_REF (node);
if (info->node_enqueued)
return;
info->node_enqueued = 1;
@@ -860,7 +860,7 @@ push_node_to_stack (struct ipa_topo_info *topo, struct cgraph_node *node)
is empty. */
static struct cgraph_node *
-pop_node_from_stack (struct ipa_topo_info *topo)
+pop_node_from_stack (class ipa_topo_info *topo)
{
if (topo->stack_top)
{
@@ -902,7 +902,7 @@ ipcp_lattice<valtype>::set_contains_variable ()
not previously set as such. */
static inline bool
-set_agg_lats_to_bottom (struct ipcp_param_lattices *plats)
+set_agg_lats_to_bottom (class ipcp_param_lattices *plats)
{
bool ret = !plats->aggs_bottom;
plats->aggs_bottom = true;
@@ -913,7 +913,7 @@ set_agg_lats_to_bottom (struct ipcp_param_lattices *plats)
return true if they were not previously marked as such. */
static inline bool
-set_agg_lats_contain_variable (struct ipcp_param_lattices *plats)
+set_agg_lats_contain_variable (class ipcp_param_lattices *plats)
{
bool ret = !plats->aggs_contain_variable;
plats->aggs_contain_variable = true;
@@ -1123,7 +1123,7 @@ ipcp_bits_lattice::meet_with (ipcp_bits_lattice& other, unsigned precision,
return true is any of them has not been marked as such so far. */
static inline bool
-set_all_contains_variable (struct ipcp_param_lattices *plats)
+set_all_contains_variable (class ipcp_param_lattices *plats)
{
bool ret;
ret = plats->itself.set_contains_variable ();
@@ -1173,7 +1173,7 @@ set_single_call_flag (cgraph_node *node, void *)
static void
initialize_node_lattices (struct cgraph_node *node)
{
- struct ipa_node_params *info = IPA_NODE_REF (node);
+ class ipa_node_params *info = IPA_NODE_REF (node);
struct cgraph_edge *ie;
bool disable = false, variable = false;
int i;
@@ -1203,7 +1203,7 @@ initialize_node_lattices (struct cgraph_node *node)
for (i = 0; i < ipa_get_param_count (info); i++)
{
- struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
+ class ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
plats->m_value_range.init ();
}
@@ -1211,7 +1211,7 @@ initialize_node_lattices (struct cgraph_node *node)
{
for (i = 0; i < ipa_get_param_count (info); i++)
{
- struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
+ class ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
if (disable)
{
plats->itself.set_to_bottom ();
@@ -1304,7 +1304,7 @@ ipa_get_jf_ancestor_result (struct ipa_jump_func *jfunc, tree input)
passed. */
tree
-ipa_value_from_jfunc (struct ipa_node_params *info, struct ipa_jump_func *jfunc,
+ipa_value_from_jfunc (class ipa_node_params *info, struct ipa_jump_func *jfunc,
tree parm_type)
{
if (jfunc->type == IPA_JF_CONST)
@@ -1422,7 +1422,7 @@ ipcp_verify_propagated_values (void)
FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
{
- struct ipa_node_params *info = IPA_NODE_REF (node);
+ class ipa_node_params *info = IPA_NODE_REF (node);
int i, count = ipa_get_param_count (info);
for (i = 0; i < count; i++)
@@ -1674,7 +1674,7 @@ propagate_scalar_across_jump_function (struct cgraph_edge *cs,
else if (jfunc->type == IPA_JF_PASS_THROUGH
|| jfunc->type == IPA_JF_ANCESTOR)
{
- struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
+ class ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
ipcp_lattice<tree> *src_lat;
int src_idx;
bool ret;
@@ -1736,7 +1736,7 @@ propagate_context_across_jump_function (cgraph_edge *cs,
if (jfunc->type == IPA_JF_PASS_THROUGH
|| jfunc->type == IPA_JF_ANCESTOR)
{
- struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
+ class ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
int src_idx;
ipcp_lattice<ipa_polymorphic_call_context> *src_lat;
@@ -1812,7 +1812,7 @@ propagate_bits_across_jump_function (cgraph_edge *cs, int idx,
enum availability availability;
cgraph_node *callee = cs->callee->function_symbol (&availability);
- struct ipa_node_params *callee_info = IPA_NODE_REF (callee);
+ class ipa_node_params *callee_info = IPA_NODE_REF (callee);
tree parm_type = ipa_get_type (callee_info, idx);
/* For K&R C programs, ipa_get_type() could return NULL_TREE. Avoid the
@@ -1835,7 +1835,7 @@ propagate_bits_across_jump_function (cgraph_edge *cs, int idx,
if (jfunc->type == IPA_JF_PASS_THROUGH
|| jfunc->type == IPA_JF_ANCESTOR)
{
- struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
+ class ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
tree operand = NULL_TREE;
enum tree_code code;
unsigned src_idx;
@@ -1855,7 +1855,7 @@ propagate_bits_across_jump_function (cgraph_edge *cs, int idx,
operand = build_int_cstu (size_type_node, offset);
}
- struct ipcp_param_lattices *src_lats
+ class ipcp_param_lattices *src_lats
= ipa_get_parm_lattices (caller_info, src_idx);
/* Try to propagate bits if src_lattice is bottom, but jfunc is known.
@@ -1909,7 +1909,7 @@ ipa_vr_operation_and_type_effects (value_range_base *dst_vr,
static bool
propagate_vr_across_jump_function (cgraph_edge *cs, ipa_jump_func *jfunc,
- struct ipcp_param_lattices *dest_plats,
+ class ipcp_param_lattices *dest_plats,
tree param_type)
{
ipcp_vr_lattice *dest_lat = &dest_plats->m_value_range;
@@ -1928,10 +1928,10 @@ propagate_vr_across_jump_function (cgraph_edge *cs, ipa_jump_func *jfunc,
if (TREE_CODE_CLASS (operation) == tcc_unary)
{
- struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
+ class ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
int src_idx = ipa_get_jf_pass_through_formal_id (jfunc);
tree operand_type = ipa_get_type (caller_info, src_idx);
- struct ipcp_param_lattices *src_lats
+ class ipcp_param_lattices *src_lats
= ipa_get_parm_lattices (caller_info, src_idx);
if (src_lats->m_value_range.bottom_p ())
@@ -1974,7 +1974,7 @@ propagate_vr_across_jump_function (cgraph_edge *cs, ipa_jump_func *jfunc,
aggs_by_ref to NEW_AGGS_BY_REF. */
static bool
-set_check_aggs_by_ref (struct ipcp_param_lattices *dest_plats,
+set_check_aggs_by_ref (class ipcp_param_lattices *dest_plats,
bool new_aggs_by_ref)
{
if (dest_plats->aggs)
@@ -2001,7 +2001,7 @@ set_check_aggs_by_ref (struct ipcp_param_lattices *dest_plats,
true. */
static bool
-merge_agg_lats_step (struct ipcp_param_lattices *dest_plats,
+merge_agg_lats_step (class ipcp_param_lattices *dest_plats,
HOST_WIDE_INT offset, HOST_WIDE_INT val_size,
struct ipcp_agg_lattice ***aglat,
bool pre_existing, bool *change)
@@ -2079,8 +2079,8 @@ set_chain_of_aglats_contains_variable (struct ipcp_agg_lattice *aglat)
static bool
merge_aggregate_lattices (struct cgraph_edge *cs,
- struct ipcp_param_lattices *dest_plats,
- struct ipcp_param_lattices *src_plats,
+ class ipcp_param_lattices *dest_plats,
+ class ipcp_param_lattices *src_plats,
int src_idx, HOST_WIDE_INT offset_delta)
{
bool pre_existing = dest_plats->aggs != NULL;
@@ -2134,7 +2134,7 @@ merge_aggregate_lattices (struct cgraph_edge *cs,
rules about propagating values passed by reference. */
static bool
-agg_pass_through_permissible_p (struct ipcp_param_lattices *src_plats,
+agg_pass_through_permissible_p (class ipcp_param_lattices *src_plats,
struct ipa_jump_func *jfunc)
{
return src_plats->aggs
@@ -2148,7 +2148,7 @@ agg_pass_through_permissible_p (struct ipcp_param_lattices *src_plats,
static bool
propagate_aggs_across_jump_function (struct cgraph_edge *cs,
struct ipa_jump_func *jfunc,
- struct ipcp_param_lattices *dest_plats)
+ class ipcp_param_lattices *dest_plats)
{
bool ret = false;
@@ -2158,9 +2158,9 @@ propagate_aggs_across_jump_function (struct cgraph_edge *cs,
if (jfunc->type == IPA_JF_PASS_THROUGH
&& ipa_get_jf_pass_through_operation (jfunc) == NOP_EXPR)
{
- struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
+ class ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
int src_idx = ipa_get_jf_pass_through_formal_id (jfunc);
- struct ipcp_param_lattices *src_plats;
+ class ipcp_param_lattices *src_plats;
src_plats = ipa_get_parm_lattices (caller_info, src_idx);
if (agg_pass_through_permissible_p (src_plats, jfunc))
@@ -2177,9 +2177,9 @@ propagate_aggs_across_jump_function (struct cgraph_edge *cs,
else if (jfunc->type == IPA_JF_ANCESTOR
&& ipa_get_jf_ancestor_agg_preserved (jfunc))
{
- struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
+ class ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
int src_idx = ipa_get_jf_ancestor_formal_id (jfunc);
- struct ipcp_param_lattices *src_plats;
+ class ipcp_param_lattices *src_plats;
src_plats = ipa_get_parm_lattices (caller_info, src_idx);
if (src_plats->aggs && src_plats->aggs_by_ref)
@@ -2250,10 +2250,10 @@ call_passes_through_thunk_p (cgraph_edge *cs)
static bool
propagate_constants_across_call (struct cgraph_edge *cs)
{
- struct ipa_node_params *callee_info;
+ class ipa_node_params *callee_info;
enum availability availability;
cgraph_node *callee;
- struct ipa_edge_args *args;
+ class ipa_edge_args *args;
bool ret = false;
int i, args_count, parms_count;
@@ -2284,7 +2284,7 @@ propagate_constants_across_call (struct cgraph_edge *cs)
for (; (i < args_count) && (i < parms_count); i++)
{
struct ipa_jump_func *jump_func = ipa_get_ith_jump_func (args, i);
- struct ipcp_param_lattices *dest_plats;
+ class ipcp_param_lattices *dest_plats;
tree param_type = ipa_get_type (callee_info, i);
dest_plats = ipa_get_parm_lattices (callee_info, i);
@@ -2563,7 +2563,7 @@ devirtualization_time_bonus (struct cgraph_node *node,
for (ie = node->indirect_calls; ie; ie = ie->next_callee)
{
struct cgraph_node *callee;
- struct ipa_fn_summary *isummary;
+ class ipa_fn_summary *isummary;
enum availability avail;
tree target;
bool speculative;
@@ -2645,7 +2645,7 @@ good_cloning_opportunity_p (struct cgraph_node *node, int time_benefit,
gcc_assert (size_cost > 0);
- struct ipa_node_params *info = IPA_NODE_REF (node);
+ class ipa_node_params *info = IPA_NODE_REF (node);
if (max_count > profile_count::zero ())
{
int factor = RDIV (count_sum.probability_in
@@ -2692,7 +2692,7 @@ good_cloning_opportunity_p (struct cgraph_node *node, int time_benefit,
vector. Return NULL if there are none. */
static vec<ipa_agg_jf_item, va_gc> *
-context_independent_aggregate_values (struct ipcp_param_lattices *plats)
+context_independent_aggregate_values (class ipcp_param_lattices *plats)
{
vec<ipa_agg_jf_item, va_gc> *res = NULL;
@@ -2721,7 +2721,7 @@ context_independent_aggregate_values (struct ipcp_param_lattices *plats)
it. */
static bool
-gather_context_independent_values (struct ipa_node_params *info,
+gather_context_independent_values (class ipa_node_params *info,
vec<tree> *known_csts,
vec<ipa_polymorphic_call_context>
*known_contexts,
@@ -2746,7 +2746,7 @@ gather_context_independent_values (struct ipa_node_params *info,
for (i = 0; i < count; i++)
{
- struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
+ class ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
ipcp_lattice<tree> *lat = &plats->itself;
if (lat->is_single_const ())
@@ -2863,7 +2863,7 @@ perform_estimation_of_a_value (cgraph_node *node, vec<tree> known_csts,
static void
estimate_local_effects (struct cgraph_node *node)
{
- struct ipa_node_params *info = IPA_NODE_REF (node);
+ class ipa_node_params *info = IPA_NODE_REF (node);
int i, count = ipa_get_param_count (info);
vec<tree> known_csts;
vec<ipa_polymorphic_call_context> known_contexts;
@@ -2943,7 +2943,7 @@ estimate_local_effects (struct cgraph_node *node)
for (i = 0; i < count; i++)
{
- struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
+ class ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
ipcp_lattice<tree> *lat = &plats->itself;
ipcp_value<tree> *val;
@@ -2977,7 +2977,7 @@ estimate_local_effects (struct cgraph_node *node)
for (i = 0; i < count; i++)
{
- struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
+ class ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
if (!plats->virt_call)
continue;
@@ -3012,7 +3012,7 @@ estimate_local_effects (struct cgraph_node *node)
for (i = 0; i < count; i++)
{
- struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
+ class ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
struct ipa_agg_jump_function *ajf;
struct ipcp_agg_lattice *aglat;
@@ -3129,12 +3129,12 @@ value_topo_info<valtype>::add_val (ipcp_value<valtype> *cur_val)
static void
add_all_node_vals_to_toposort (cgraph_node *node, ipa_topo_info *topo)
{
- struct ipa_node_params *info = IPA_NODE_REF (node);
+ class ipa_node_params *info = IPA_NODE_REF (node);
int i, count = ipa_get_param_count (info);
for (i = 0; i < count; i++)
{
- struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
+ class ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
ipcp_lattice<tree> *lat = &plats->itself;
struct ipcp_agg_lattice *aglat;
@@ -3169,7 +3169,7 @@ add_all_node_vals_to_toposort (cgraph_node *node, ipa_topo_info *topo)
connected components. */
static void
-propagate_constants_topo (struct ipa_topo_info *topo)
+propagate_constants_topo (class ipa_topo_info *topo)
{
int i;
@@ -3272,7 +3272,7 @@ value_topo_info<valtype>::propagate_effects ()
summaries interprocedurally. */
static void
-ipcp_propagate_stage (struct ipa_topo_info *topo)
+ipcp_propagate_stage (class ipa_topo_info *topo)
{
struct cgraph_node *node;
@@ -3283,12 +3283,12 @@ ipcp_propagate_stage (struct ipa_topo_info *topo)
FOR_EACH_DEFINED_FUNCTION (node)
{
- struct ipa_node_params *info = IPA_NODE_REF (node);
+ class ipa_node_params *info = IPA_NODE_REF (node);
determine_versionability (node, info);
if (node->has_gimple_body_p ())
{
- info->lattices = XCNEWVEC (struct ipcp_param_lattices,
+ info->lattices = XCNEWVEC (class ipcp_param_lattices,
ipa_get_param_count (info));
initialize_node_lattices (node);
}
@@ -3352,7 +3352,7 @@ ipcp_discover_new_direct_edges (struct cgraph_node *node,
if (cs && !agg_contents && !polymorphic)
{
- struct ipa_node_params *info = IPA_NODE_REF (node);
+ class ipa_node_params *info = IPA_NODE_REF (node);
int c = ipa_get_controlled_uses (info, param_index);
if (c != IPA_UNDESCRIBED_USE)
{
@@ -3461,7 +3461,7 @@ same_node_or_its_all_contexts_clone_p (cgraph_node *node, cgraph_node *dest)
if (node == dest)
return true;
- struct ipa_node_params *info = IPA_NODE_REF (node);
+ class ipa_node_params *info = IPA_NODE_REF (node);
return info->is_all_contexts_clone && info->ipcp_orig_node == dest;
}
@@ -3472,7 +3472,7 @@ static bool
cgraph_edge_brings_value_p (cgraph_edge *cs, ipcp_value_source<tree> *src,
cgraph_node *dest, ipcp_value<tree> *dest_val)
{
- struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
+ class ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
enum availability availability;
cgraph_node *real_dest = cs->callee->function_symbol (&availability);
@@ -3503,7 +3503,7 @@ cgraph_edge_brings_value_p (cgraph_edge *cs, ipcp_value_source<tree> *src,
return true;
struct ipcp_agg_lattice *aglat;
- struct ipcp_param_lattices *plats = ipa_get_parm_lattices (caller_info,
+ class ipcp_param_lattices *plats = ipa_get_parm_lattices (caller_info,
src->index);
if (src->offset == -1)
return (plats->itself.is_single_const ()
@@ -3532,7 +3532,7 @@ cgraph_edge_brings_value_p (cgraph_edge *cs,
cgraph_node *dest,
ipcp_value<ipa_polymorphic_call_context> *)
{
- struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
+ class ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
cgraph_node *real_dest = cs->callee->function_symbol ();
if (!same_node_or_its_all_contexts_clone_p (real_dest, dest)
@@ -3546,7 +3546,7 @@ cgraph_edge_brings_value_p (cgraph_edge *cs,
&& values_equal_for_ipcp_p (src->val->value,
caller_info->known_contexts[src->index]);
- struct ipcp_param_lattices *plats = ipa_get_parm_lattices (caller_info,
+ class ipcp_param_lattices *plats = ipa_get_parm_lattices (caller_info,
src->index);
return plats->ctxlat.is_single_const ()
&& values_equal_for_ipcp_p (src->val->value,
@@ -3639,7 +3639,7 @@ gather_edges_for_value (ipcp_value<valtype> *val, cgraph_node *dest,
Return it or NULL if for some reason it cannot be created. */
static struct ipa_replace_map *
-get_replacement_map (struct ipa_node_params *info, tree value, int parm_num)
+get_replacement_map (class ipa_node_params *info, tree value, int parm_num)
{
struct ipa_replace_map *replace_map;
@@ -3808,7 +3808,7 @@ create_specialized_node (struct cgraph_node *node,
struct ipa_agg_replacement_value *aggvals,
vec<cgraph_edge *> callers)
{
- struct ipa_node_params *new_info, *info = IPA_NODE_REF (node);
+ class ipa_node_params *new_info, *info = IPA_NODE_REF (node);
vec<ipa_replace_map *, va_gc> *replace_trees = NULL;
struct ipa_agg_replacement_value *av;
struct cgraph_node *new_node;
@@ -3942,7 +3942,7 @@ find_more_scalar_values_for_callers_subset (struct cgraph_node *node,
vec<tree> known_csts,
vec<cgraph_edge *> callers)
{
- struct ipa_node_params *info = IPA_NODE_REF (node);
+ class ipa_node_params *info = IPA_NODE_REF (node);
int i, count = ipa_get_param_count (info);
for (i = 0; i < count; i++)
@@ -4075,7 +4075,7 @@ find_more_contexts_for_caller_subset (cgraph_node *node,
offsets (minus OFFSET) of lattices that contain only a single value. */
static vec<ipa_agg_jf_item>
-copy_plats_to_inter (struct ipcp_param_lattices *plats, HOST_WIDE_INT offset)
+copy_plats_to_inter (class ipcp_param_lattices *plats, HOST_WIDE_INT offset)
{
vec<ipa_agg_jf_item> res = vNULL;
@@ -4097,7 +4097,7 @@ copy_plats_to_inter (struct ipcp_param_lattices *plats, HOST_WIDE_INT offset)
subtracting OFFSET). */
static void
-intersect_with_plats (struct ipcp_param_lattices *plats,
+intersect_with_plats (class ipcp_param_lattices *plats,
vec<ipa_agg_jf_item> *inter,
HOST_WIDE_INT offset)
{
@@ -4217,13 +4217,13 @@ intersect_aggregates_with_edge (struct cgraph_edge *cs, int index,
if (jfunc->type == IPA_JF_PASS_THROUGH
&& ipa_get_jf_pass_through_operation (jfunc) == NOP_EXPR)
{
- struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
+ class ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
int src_idx = ipa_get_jf_pass_through_formal_id (jfunc);
if (caller_info->ipcp_orig_node)
{
struct cgraph_node *orig_node = caller_info->ipcp_orig_node;
- struct ipcp_param_lattices *orig_plats;
+ class ipcp_param_lattices *orig_plats;
orig_plats = ipa_get_parm_lattices (IPA_NODE_REF (orig_node),
src_idx);
if (agg_pass_through_permissible_p (orig_plats, jfunc))
@@ -4242,7 +4242,7 @@ intersect_aggregates_with_edge (struct cgraph_edge *cs, int index,
}
else
{
- struct ipcp_param_lattices *src_plats;
+ class ipcp_param_lattices *src_plats;
src_plats = ipa_get_parm_lattices (caller_info, src_idx);
if (agg_pass_through_permissible_p (src_plats, jfunc))
{
@@ -4264,9 +4264,9 @@ intersect_aggregates_with_edge (struct cgraph_edge *cs, int index,
else if (jfunc->type == IPA_JF_ANCESTOR
&& ipa_get_jf_ancestor_agg_preserved (jfunc))
{
- struct ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
+ class ipa_node_params *caller_info = IPA_NODE_REF (cs->caller);
int src_idx = ipa_get_jf_ancestor_formal_id (jfunc);
- struct ipcp_param_lattices *src_plats;
+ class ipcp_param_lattices *src_plats;
HOST_WIDE_INT delta = ipa_get_jf_ancestor_offset (jfunc);
if (caller_info->ipcp_orig_node)
@@ -4341,7 +4341,7 @@ static struct ipa_agg_replacement_value *
find_aggregate_values_for_callers_subset (struct cgraph_node *node,
vec<cgraph_edge *> callers)
{
- struct ipa_node_params *dest_info = IPA_NODE_REF (node);
+ class ipa_node_params *dest_info = IPA_NODE_REF (node);
struct ipa_agg_replacement_value *res;
struct ipa_agg_replacement_value **tail = &res;
struct cgraph_edge *cs;
@@ -4359,7 +4359,7 @@ find_aggregate_values_for_callers_subset (struct cgraph_node *node,
struct cgraph_edge *cs;
vec<ipa_agg_jf_item> inter = vNULL;
struct ipa_agg_jf_item *item;
- struct ipcp_param_lattices *plats = ipa_get_parm_lattices (dest_info, i);
+ class ipcp_param_lattices *plats = ipa_get_parm_lattices (dest_info, i);
int j;
/* Among other things, the following check should deal with all by_ref
@@ -4412,10 +4412,10 @@ static bool
cgraph_edge_brings_all_scalars_for_node (struct cgraph_edge *cs,
struct cgraph_node *node)
{
- struct ipa_node_params *dest_info = IPA_NODE_REF (node);
+ class ipa_node_params *dest_info = IPA_NODE_REF (node);
int count = ipa_get_param_count (dest_info);
- struct ipa_node_params *caller_info;
- struct ipa_edge_args *args;
+ class ipa_node_params *caller_info;
+ class ipa_edge_args *args;
int i;
caller_info = IPA_NODE_REF (cs->caller);
@@ -4446,7 +4446,7 @@ static bool
cgraph_edge_brings_all_agg_vals_for_node (struct cgraph_edge *cs,
struct cgraph_node *node)
{
- struct ipa_node_params *orig_node_info;
+ class ipa_node_params *orig_node_info;
struct ipa_agg_replacement_value *aggval;
int i, ec, count;
@@ -4466,7 +4466,7 @@ cgraph_edge_brings_all_agg_vals_for_node (struct cgraph_edge *cs,
for (i = 0; i < count; i++)
{
static vec<ipa_agg_jf_item> values = vec<ipa_agg_jf_item>();
- struct ipcp_param_lattices *plats;
+ class ipcp_param_lattices *plats;
bool interesting = false;
for (struct ipa_agg_replacement_value *av = aggval; av; av = av->next)
if (aggval->index == i)
@@ -4722,7 +4722,7 @@ decide_about_value (struct cgraph_node *node, int index, HOST_WIDE_INT offset,
static bool
decide_whether_version_node (struct cgraph_node *node)
{
- struct ipa_node_params *info = IPA_NODE_REF (node);
+ class ipa_node_params *info = IPA_NODE_REF (node);
int i, count = ipa_get_param_count (info);
vec<tree> known_csts;
vec<ipa_polymorphic_call_context> known_contexts;
@@ -4742,7 +4742,7 @@ decide_whether_version_node (struct cgraph_node *node)
for (i = 0; i < count;i++)
{
- struct ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
+ class ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
ipcp_lattice<tree> *lat = &plats->itself;
ipcp_lattice<ipa_polymorphic_call_context> *ctxlat = &plats->ctxlat;
@@ -4832,7 +4832,7 @@ spread_undeadness (struct cgraph_node *node)
if (ipa_edge_within_scc (cs))
{
struct cgraph_node *callee;
- struct ipa_node_params *info;
+ class ipa_node_params *info;
callee = cs->callee->function_symbol (NULL);
info = IPA_NODE_REF (callee);
@@ -4895,7 +4895,7 @@ identify_dead_nodes (struct cgraph_node *node)
TOPO and make specialized clones if deemed beneficial. */
static void
-ipcp_decision_stage (struct ipa_topo_info *topo)
+ipcp_decision_stage (class ipa_topo_info *topo)
{
int i;
@@ -5069,7 +5069,7 @@ ipcp_store_vr_results (void)
static unsigned int
ipcp_driver (void)
{
- struct ipa_topo_info topo;
+ class ipa_topo_info topo;
if (edge_clone_summaries == NULL)
edge_clone_summaries = new edge_clone_summary_t (symtab);
diff --git a/gcc/ipa-devirt.c b/gcc/ipa-devirt.c
index a5e3a63b66d..95e2d950fe9 100644
--- a/gcc/ipa-devirt.c
+++ b/gcc/ipa-devirt.c
@@ -2974,7 +2974,7 @@ final_warning_record::grow_type_warnings (unsigned newlen)
}
}
-struct final_warning_record *final_warning_records;
+class final_warning_record *final_warning_records;
/* Return vector containing possible targets of polymorphic call of type
OTR_TYPE calling method OTR_TOKEN within type of OTR_OUTER_TYPE and OFFSET.
diff --git a/gcc/ipa-fnsummary.c b/gcc/ipa-fnsummary.c
index 160261d34c9..09986211a1d 100644
--- a/gcc/ipa-fnsummary.c
+++ b/gcc/ipa-fnsummary.c
@@ -212,7 +212,7 @@ ipa_fn_summary::account_size_time (int size, sreal time,
}
if (!found)
{
- struct size_time_entry new_entry;
+ class size_time_entry new_entry;
new_entry.size = size;
new_entry.time = time;
new_entry.exec_predicate = exec_pred;
@@ -241,7 +241,7 @@ redirect_to_unreachable (struct cgraph_edge *e)
e->make_direct (target);
else
e->redirect_callee (target);
- struct ipa_call_summary *es = ipa_call_summaries->get (e);
+ class ipa_call_summary *es = ipa_call_summaries->get (e);
e->inline_failed = CIF_UNREACHABLE;
e->count = profile_count::zero ();
es->call_stmt_size = 0;
@@ -266,7 +266,7 @@ edge_set_predicate (struct cgraph_edge *e, predicate *predicate)
&& (!e->speculative || e->callee))
e = redirect_to_unreachable (e);
- struct ipa_call_summary *es = ipa_call_summaries->get (e);
+ class ipa_call_summary *es = ipa_call_summaries->get (e);
if (predicate && *predicate != true)
{
if (!es->predicate)
@@ -328,7 +328,7 @@ evaluate_conditions_for_known_args (struct cgraph_node *node,
{
clause_t clause = inline_p ? 0 : 1 << predicate::not_inlined_condition;
clause_t nonspec_clause = 1 << predicate::not_inlined_condition;
- struct ipa_fn_summary *info = ipa_fn_summaries->get (node);
+ class ipa_fn_summary *info = ipa_fn_summaries->get (node);
int i;
struct condition *c;
@@ -428,7 +428,7 @@ evaluate_properties_for_edge (struct cgraph_edge *e, bool inline_p,
vec<ipa_agg_jump_function_p> *known_aggs_ptr)
{
struct cgraph_node *callee = e->callee->ultimate_alias_target ();
- struct ipa_fn_summary *info = ipa_fn_summaries->get (callee);
+ class ipa_fn_summary *info = ipa_fn_summaries->get (callee);
vec<tree> known_vals = vNULL;
vec<ipa_agg_jump_function_p> known_aggs = vNULL;
@@ -443,9 +443,9 @@ evaluate_properties_for_edge (struct cgraph_edge *e, bool inline_p,
&& !e->call_stmt_cannot_inline_p
&& ((clause_ptr && info->conds) || known_vals_ptr || known_contexts_ptr))
{
- struct ipa_node_params *caller_parms_info, *callee_pi;
- struct ipa_edge_args *args = IPA_EDGE_REF (e);
- struct ipa_call_summary *es = ipa_call_summaries->get (e);
+ class ipa_node_params *caller_parms_info, *callee_pi;
+ class ipa_edge_args *args = IPA_EDGE_REF (e);
+ class ipa_call_summary *es = ipa_call_summaries->get (e);
int i, count = ipa_get_cs_argument_count (args);
if (e->caller->global.inlined_to)
@@ -604,7 +604,7 @@ ipa_fn_summary_t::duplicate (cgraph_node *src,
{
vec<size_time_entry, va_gc> *entry = info->size_time_table;
/* Use SRC parm info since it may not be copied yet. */
- struct ipa_node_params *parms_info = IPA_NODE_REF (src);
+ class ipa_node_params *parms_info = IPA_NODE_REF (src);
vec<tree> known_vals = vNULL;
int count = ipa_get_param_count (parms_info);
int i, j;
@@ -668,7 +668,7 @@ ipa_fn_summary_t::duplicate (cgraph_node *src,
for (edge = dst->callees; edge; edge = next)
{
predicate new_predicate;
- struct ipa_call_summary *es = ipa_call_summaries->get_create (edge);
+ class ipa_call_summary *es = ipa_call_summaries->get_create (edge);
next = edge->next_callee;
if (!edge->inline_failed)
@@ -687,7 +687,7 @@ ipa_fn_summary_t::duplicate (cgraph_node *src,
for (edge = dst->indirect_calls; edge; edge = next)
{
predicate new_predicate;
- struct ipa_call_summary *es = ipa_call_summaries->get_create (edge);
+ class ipa_call_summary *es = ipa_call_summaries->get_create (edge);
next = edge->next_callee;
gcc_checking_assert (edge->inline_failed);
@@ -744,8 +744,8 @@ ipa_fn_summary_t::duplicate (cgraph_node *src,
void
ipa_call_summary_t::duplicate (struct cgraph_edge *src,
struct cgraph_edge *dst,
- struct ipa_call_summary *srcinfo,
- struct ipa_call_summary *info)
+ class ipa_call_summary *srcinfo,
+ class ipa_call_summary *info)
{
new (info) ipa_call_summary (*srcinfo);
info->predicate = NULL;
@@ -765,12 +765,12 @@ ipa_call_summary_t::duplicate (struct cgraph_edge *src,
static void
dump_ipa_call_summary (FILE *f, int indent, struct cgraph_node *node,
- struct ipa_fn_summary *info)
+ class ipa_fn_summary *info)
{
struct cgraph_edge *edge;
for (edge = node->callees; edge; edge = edge->next_callee)
{
- struct ipa_call_summary *es = ipa_call_summaries->get (edge);
+ class ipa_call_summary *es = ipa_call_summaries->get (edge);
struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
int i;
@@ -821,7 +821,7 @@ dump_ipa_call_summary (FILE *f, int indent, struct cgraph_node *node,
}
for (edge = node->indirect_calls; edge; edge = edge->next_callee)
{
- struct ipa_call_summary *es = ipa_call_summaries->get (edge);
+ class ipa_call_summary *es = ipa_call_summaries->get (edge);
fprintf (f, "%*sindirect call loop depth:%2i freq:%4.2f size:%2i"
" time: %2i",
indent, "",
@@ -844,7 +844,7 @@ ipa_dump_fn_summary (FILE *f, struct cgraph_node *node)
{
if (node->definition)
{
- struct ipa_fn_summary *s = ipa_fn_summaries->get (node);
+ class ipa_fn_summary *s = ipa_fn_summaries->get (node);
if (s != NULL)
{
size_time_entry *e;
@@ -1183,7 +1183,7 @@ eliminated_by_inlining_prob (ipa_func_body_info *fbi, gimple *stmt)
static void
set_cond_stmt_execution_predicate (struct ipa_func_body_info *fbi,
- struct ipa_fn_summary *summary,
+ class ipa_fn_summary *summary,
basic_block bb)
{
gimple *last;
@@ -1268,7 +1268,7 @@ set_cond_stmt_execution_predicate (struct ipa_func_body_info *fbi,
static void
set_switch_stmt_execution_predicate (struct ipa_func_body_info *fbi,
- struct ipa_fn_summary *summary,
+ class ipa_fn_summary *summary,
basic_block bb)
{
gimple *lastg;
@@ -1322,8 +1322,8 @@ set_switch_stmt_execution_predicate (struct ipa_func_body_info *fbi,
unshare_expr_without_location (max));
p = p1 & p2;
}
- *(struct predicate *) e->aux
- = p.or_with (summary->conds, *(struct predicate *) e->aux);
+ *(class predicate *) e->aux
+ = p.or_with (summary->conds, *(class predicate *) e->aux);
}
}
@@ -1334,7 +1334,7 @@ set_switch_stmt_execution_predicate (struct ipa_func_body_info *fbi,
static void
compute_bb_predicates (struct ipa_func_body_info *fbi,
struct cgraph_node *node,
- struct ipa_fn_summary *summary)
+ class ipa_fn_summary *summary)
{
struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
bool done = false;
@@ -1368,7 +1368,7 @@ compute_bb_predicates (struct ipa_func_body_info *fbi,
predicate this_bb_predicate
= *(predicate *) e->src->aux;
if (e->aux)
- this_bb_predicate &= (*(struct predicate *) e->aux);
+ this_bb_predicate &= (*(class predicate *) e->aux);
p = p.or_with (summary->conds, this_bb_predicate);
if (p == true)
break;
@@ -1407,7 +1407,7 @@ compute_bb_predicates (struct ipa_func_body_info *fbi,
static predicate
will_be_nonconstant_expr_predicate (ipa_func_body_info *fbi,
- struct ipa_fn_summary *summary,
+ class ipa_fn_summary *summary,
tree expr,
vec<predicate> nonconstant_names)
{
@@ -1478,7 +1478,7 @@ will_be_nonconstant_expr_predicate (ipa_func_body_info *fbi,
static predicate
will_be_nonconstant_predicate (struct ipa_func_body_info *fbi,
- struct ipa_fn_summary *summary,
+ class ipa_fn_summary *summary,
gimple *stmt,
vec<predicate> nonconstant_names)
{
@@ -1586,7 +1586,7 @@ struct record_modified_bb_info
static basic_block
get_minimal_bb (basic_block init_bb, basic_block use_bb)
{
- struct loop *l = find_common_loop (init_bb->loop_father, use_bb->loop_father);
+ class loop *l = find_common_loop (init_bb->loop_father, use_bb->loop_father);
if (l && l->header->count < init_bb->count)
return l->header;
return init_bb;
@@ -1797,7 +1797,7 @@ phi_result_unknown_predicate (ipa_func_body_info *fbi,
NONCONSTANT_NAMES, if possible. */
static void
-predicate_for_phi_result (struct ipa_fn_summary *summary, gphi *phi,
+predicate_for_phi_result (class ipa_fn_summary *summary, gphi *phi,
predicate *p,
vec<predicate> nonconstant_names)
{
@@ -1995,7 +1995,7 @@ analyze_function_body (struct cgraph_node *node, bool early)
basic_block bb;
struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
sreal freq;
- struct ipa_fn_summary *info = ipa_fn_summaries->get_create (node);
+ class ipa_fn_summary *info = ipa_fn_summaries->get_create (node);
predicate bb_predicate;
struct ipa_func_body_info fbi;
vec<predicate> nonconstant_names = vNULL;
@@ -2236,7 +2236,7 @@ analyze_function_body (struct cgraph_node *node, bool early)
if (prob == 2 && dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "\t\tWill be eliminated by inlining\n");
- struct predicate p = bb_predicate & will_be_nonconstant;
+ class predicate p = bb_predicate & will_be_nonconstant;
/* We can ignore statement when we proved it is never going
to happen, but we cannot do that for call statements
@@ -2285,7 +2285,7 @@ analyze_function_body (struct cgraph_node *node, bool early)
if (nonconstant_names.exists () && !early)
{
- struct loop *loop;
+ class loop *loop;
predicate loop_iterations = true;
predicate loop_stride = true;
@@ -2297,7 +2297,7 @@ analyze_function_body (struct cgraph_node *node, bool early)
vec<edge> exits;
edge ex;
unsigned int j;
- struct tree_niter_desc niter_desc;
+ class tree_niter_desc niter_desc;
bb_predicate = *(predicate *) loop->header->aux;
exits = get_loop_exit_edges (loop);
@@ -2413,7 +2413,7 @@ compute_fn_summary (struct cgraph_node *node, bool early)
{
HOST_WIDE_INT self_stack_size;
struct cgraph_edge *e;
- struct ipa_fn_summary *info;
+ class ipa_fn_summary *info;
gcc_assert (!node->global.inlined_to);
@@ -2539,7 +2539,7 @@ estimate_edge_devirt_benefit (struct cgraph_edge *ie,
{
tree target;
struct cgraph_node *callee;
- struct ipa_fn_summary *isummary;
+ class ipa_fn_summary *isummary;
enum availability avail;
bool speculative;
@@ -2587,7 +2587,7 @@ estimate_edge_size_and_time (struct cgraph_edge *e, int *size, int *min_size,
vec<ipa_agg_jump_function_p> known_aggs,
ipa_hints *hints)
{
- struct ipa_call_summary *es = ipa_call_summaries->get (e);
+ class ipa_call_summary *es = ipa_call_summaries->get (e);
int call_size = es->call_stmt_size;
int call_time = es->call_stmt_time;
int cur_size;
@@ -2624,7 +2624,7 @@ estimate_calls_size_and_time (struct cgraph_node *node, int *size,
struct cgraph_edge *e;
for (e = node->callees; e; e = e->next_callee)
{
- struct ipa_call_summary *es = ipa_call_summaries->get_create (e);
+ class ipa_call_summary *es = ipa_call_summaries->get_create (e);
/* Do not care about zero sized builtins. */
if (e->inline_failed && !es->call_stmt_size)
@@ -2655,7 +2655,7 @@ estimate_calls_size_and_time (struct cgraph_node *node, int *size,
}
for (e = node->indirect_calls; e; e = e->next_callee)
{
- struct ipa_call_summary *es = ipa_call_summaries->get_create (e);
+ class ipa_call_summary *es = ipa_call_summaries->get_create (e);
if (!es->predicate
|| es->predicate->evaluate (possible_truths))
estimate_edge_size_and_time (e, size,
@@ -2690,7 +2690,7 @@ estimate_node_size_and_time (struct cgraph_node *node,
vec<inline_param_summary>
inline_param_summary)
{
- struct ipa_fn_summary *info = ipa_fn_summaries->get_create (node);
+ class ipa_fn_summary *info = ipa_fn_summaries->get_create (node);
size_time_entry *e;
int size = 0;
sreal time = 0;
@@ -2881,9 +2881,9 @@ remap_edge_change_prob (struct cgraph_edge *inlined_edge,
if (ipa_node_params_sum)
{
int i;
- struct ipa_edge_args *args = IPA_EDGE_REF (edge);
- struct ipa_call_summary *es = ipa_call_summaries->get (edge);
- struct ipa_call_summary *inlined_es
+ class ipa_edge_args *args = IPA_EDGE_REF (edge);
+ class ipa_call_summary *es = ipa_call_summaries->get (edge);
+ class ipa_call_summary *inlined_es
= ipa_call_summaries->get (inlined_edge);
if (es->param.length () == 0)
@@ -2924,8 +2924,8 @@ remap_edge_change_prob (struct cgraph_edge *inlined_edge,
static void
remap_edge_summaries (struct cgraph_edge *inlined_edge,
struct cgraph_node *node,
- struct ipa_fn_summary *info,
- struct ipa_fn_summary *callee_info,
+ class ipa_fn_summary *info,
+ class ipa_fn_summary *callee_info,
vec<int> operand_map,
vec<int> offset_map,
clause_t possible_truths,
@@ -2934,7 +2934,7 @@ remap_edge_summaries (struct cgraph_edge *inlined_edge,
struct cgraph_edge *e, *next;
for (e = node->callees; e; e = next)
{
- struct ipa_call_summary *es = ipa_call_summaries->get (e);
+ class ipa_call_summary *es = ipa_call_summaries->get (e);
predicate p;
next = e->next_callee;
@@ -2960,7 +2960,7 @@ remap_edge_summaries (struct cgraph_edge *inlined_edge,
}
for (e = node->indirect_calls; e; e = next)
{
- struct ipa_call_summary *es = ipa_call_summaries->get (e);
+ class ipa_call_summary *es = ipa_call_summaries->get (e);
predicate p;
next = e->next_callee;
@@ -2980,8 +2980,8 @@ remap_edge_summaries (struct cgraph_edge *inlined_edge,
/* Same as remap_predicate, but set result into hint *HINT. */
static void
-remap_hint_predicate (struct ipa_fn_summary *info,
- struct ipa_fn_summary *callee_info,
+remap_hint_predicate (class ipa_fn_summary *info,
+ class ipa_fn_summary *callee_info,
predicate **hint,
vec<int> operand_map,
vec<int> offset_map,
@@ -3013,7 +3013,7 @@ ipa_merge_fn_summary_after_inlining (struct cgraph_edge *edge)
ipa_fn_summary *callee_info = ipa_fn_summaries->get (edge->callee);
struct cgraph_node *to = (edge->caller->global.inlined_to
? edge->caller->global.inlined_to : edge->caller);
- struct ipa_fn_summary *info = ipa_fn_summaries->get (to);
+ class ipa_fn_summary *info = ipa_fn_summaries->get (to);
clause_t clause = 0; /* not_inline is known to be false. */
size_time_entry *e;
vec<int> operand_map = vNULL;
@@ -3021,7 +3021,7 @@ ipa_merge_fn_summary_after_inlining (struct cgraph_edge *edge)
int i;
predicate toplev_predicate;
predicate true_p = true;
- struct ipa_call_summary *es = ipa_call_summaries->get (edge);
+ class ipa_call_summary *es = ipa_call_summaries->get (edge);
if (es->predicate)
toplev_predicate = *es->predicate;
@@ -3034,7 +3034,7 @@ ipa_merge_fn_summary_after_inlining (struct cgraph_edge *edge)
evaluate_properties_for_edge (edge, true, &clause, NULL, NULL, NULL, NULL);
if (ipa_node_params_sum && callee_info->conds)
{
- struct ipa_edge_args *args = IPA_EDGE_REF (edge);
+ class ipa_edge_args *args = IPA_EDGE_REF (edge);
int count = ipa_get_cs_argument_count (args);
int i;
@@ -3127,7 +3127,7 @@ ipa_merge_fn_summary_after_inlining (struct cgraph_edge *edge)
void
ipa_update_overall_fn_summary (struct cgraph_node *node)
{
- struct ipa_fn_summary *info = ipa_fn_summaries->get_create (node);
+ class ipa_fn_summary *info = ipa_fn_summaries->get_create (node);
size_time_entry *e;
int i;
@@ -3223,10 +3223,10 @@ ipa_fn_summary_generate (void)
/* Write inline summary for edge E to OB. */
static void
-read_ipa_call_summary (struct lto_input_block *ib, struct cgraph_edge *e,
+read_ipa_call_summary (class lto_input_block *ib, struct cgraph_edge *e,
bool prevails)
{
- struct ipa_call_summary *es = prevails
+ class ipa_call_summary *es = prevails
? ipa_call_summaries->get_create (e) : NULL;
predicate p;
int length, i;
@@ -3277,7 +3277,7 @@ inline_read_section (struct lto_file_decl_data *file_data, const char *data,
const int cfg_offset = sizeof (struct lto_function_header);
const int main_offset = cfg_offset + header->cfg_size;
const int string_offset = main_offset + header->main_size;
- struct data_in *data_in;
+ class data_in *data_in;
unsigned int i, count2, j;
unsigned int f_count;
@@ -3292,7 +3292,7 @@ inline_read_section (struct lto_file_decl_data *file_data, const char *data,
{
unsigned int index;
struct cgraph_node *node;
- struct ipa_fn_summary *info;
+ class ipa_fn_summary *info;
lto_symtab_encoder_t encoder;
struct bitpack_d bp;
struct cgraph_edge *e;
@@ -3349,7 +3349,7 @@ inline_read_section (struct lto_file_decl_data *file_data, const char *data,
gcc_assert (!info || !info->size_time_table);
for (j = 0; j < count2; j++)
{
- struct size_time_entry e;
+ class size_time_entry e;
e.size = streamer_read_uhwi (&ib);
e.time = sreal::stream_in (&ib);
@@ -3423,7 +3423,7 @@ ipa_fn_summary_read (void)
static void
write_ipa_call_summary (struct output_block *ob, struct cgraph_edge *e)
{
- struct ipa_call_summary *es = ipa_call_summaries->get (e);
+ class ipa_call_summary *es = ipa_call_summaries->get (e);
int i;
streamer_write_uhwi (ob, es->call_stmt_size);
@@ -3471,7 +3471,7 @@ ipa_fn_summary_write (void)
cgraph_node *cnode = dyn_cast <cgraph_node *> (snode);
if (cnode && cnode->definition && !cnode->alias)
{
- struct ipa_fn_summary *info = ipa_fn_summaries->get (cnode);
+ class ipa_fn_summary *info = ipa_fn_summaries->get (cnode);
struct bitpack_d bp;
struct cgraph_edge *edge;
int i;
diff --git a/gcc/ipa-fnsummary.h b/gcc/ipa-fnsummary.h
index 7b077042b8e..55bc34146aa 100644
--- a/gcc/ipa-fnsummary.h
+++ b/gcc/ipa-fnsummary.h
@@ -184,7 +184,7 @@ public:
static ipa_fn_summary_t *create_ggc (symbol_table *symtab)
{
- struct ipa_fn_summary_t *summary = new (ggc_alloc <ipa_fn_summary_t> ())
+ class ipa_fn_summary_t *summary = new (ggc_alloc <ipa_fn_summary_t> ())
ipa_fn_summary_t (symtab);
summary->disable_insertion_hook ();
return summary;
diff --git a/gcc/ipa-hsa.c b/gcc/ipa-hsa.c
index b48f94ba496..8af1d734d85 100644
--- a/gcc/ipa-hsa.c
+++ b/gcc/ipa-hsa.c
@@ -221,7 +221,7 @@ ipa_hsa_read_section (struct lto_file_decl_data *file_data, const char *data,
const int cfg_offset = sizeof (struct lto_function_header);
const int main_offset = cfg_offset + header->cfg_size;
const int string_offset = main_offset + header->main_size;
- struct data_in *data_in;
+ class data_in *data_in;
unsigned int i;
unsigned int count;
diff --git a/gcc/ipa-icf-gimple.c b/gcc/ipa-icf-gimple.c
index 0713e125898..4060c0e8eb3 100644
--- a/gcc/ipa-icf-gimple.c
+++ b/gcc/ipa-icf-gimple.c
@@ -614,8 +614,8 @@ func_checker::compare_loops (basic_block bb1, basic_block bb2)
if ((bb1->loop_father == NULL) != (bb2->loop_father == NULL))
return return_false ();
- struct loop *l1 = bb1->loop_father;
- struct loop *l2 = bb2->loop_father;
+ class loop *l1 = bb1->loop_father;
+ class loop *l2 = bb2->loop_father;
if (l1 == NULL)
return true;
diff --git a/gcc/ipa-icf.c b/gcc/ipa-icf.c
index 7c486eda758..2174fb7494c 100644
--- a/gcc/ipa-icf.c
+++ b/gcc/ipa-icf.c
@@ -482,7 +482,7 @@ sem_function::param_used_p (unsigned int i)
if (ipa_node_params_sum == NULL)
return true;
- struct ipa_node_params *parms_info = IPA_NODE_REF (get_node ());
+ class ipa_node_params *parms_info = IPA_NODE_REF (get_node ());
if (vec_safe_length (parms_info->descriptors) <= i)
return true;
diff --git a/gcc/ipa-inline-analysis.c b/gcc/ipa-inline-analysis.c
index 6c77f328db0..a66af277d03 100644
--- a/gcc/ipa-inline-analysis.c
+++ b/gcc/ipa-inline-analysis.c
@@ -128,7 +128,7 @@ do_estimate_edge_time (struct cgraph_edge *edge)
vec<tree> known_vals;
vec<ipa_polymorphic_call_context> known_contexts;
vec<ipa_agg_jump_function_p> known_aggs;
- struct ipa_call_summary *es = ipa_call_summaries->get (edge);
+ class ipa_call_summary *es = ipa_call_summaries->get (edge);
int min_size;
callee = edge->callee->ultimate_alias_target ();
@@ -264,7 +264,7 @@ int
estimate_size_after_inlining (struct cgraph_node *node,
struct cgraph_edge *edge)
{
- struct ipa_call_summary *es = ipa_call_summaries->get (edge);
+ class ipa_call_summary *es = ipa_call_summaries->get (edge);
ipa_fn_summary *s = ipa_fn_summaries->get (node);
if (!es->predicate || *es->predicate != false)
{
@@ -321,7 +321,7 @@ int
estimate_growth (struct cgraph_node *node)
{
struct growth_data d = { node, false, false, 0 };
- struct ipa_fn_summary *info = ipa_fn_summaries->get (node);
+ class ipa_fn_summary *info = ipa_fn_summaries->get (node);
node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true);
diff --git a/gcc/ipa-inline.c b/gcc/ipa-inline.c
index 3475258b922..939d86ef94a 100644
--- a/gcc/ipa-inline.c
+++ b/gcc/ipa-inline.c
@@ -1037,7 +1037,7 @@ edge_badness (struct cgraph_edge *edge, bool dump)
int growth;
sreal edge_time, unspec_edge_time;
struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
- struct ipa_fn_summary *callee_info = ipa_fn_summaries->get (callee);
+ class ipa_fn_summary *callee_info = ipa_fn_summaries->get (callee);
ipa_hints hints;
cgraph_node *caller = (edge->caller->global.inlined_to
? edge->caller->global.inlined_to
@@ -1799,7 +1799,7 @@ inline_small_functions (void)
&& (node->has_gimple_body_p () || node->thunk.thunk_p)
&& opt_for_fn (node->decl, optimize))
{
- struct ipa_fn_summary *info = ipa_fn_summaries->get (node);
+ class ipa_fn_summary *info = ipa_fn_summaries->get (node);
struct ipa_dfs_info *dfs = (struct ipa_dfs_info *) node->aux;
/* Do not account external functions, they will be optimized out
diff --git a/gcc/ipa-polymorphic-call.c b/gcc/ipa-polymorphic-call.c
index 75d8ebc1c44..705af03d20c 100644
--- a/gcc/ipa-polymorphic-call.c
+++ b/gcc/ipa-polymorphic-call.c
@@ -688,8 +688,8 @@ ipa_polymorphic_call_context::stream_out (struct output_block *ob) const
/* Stream in the context from IB and DATA_IN. */
void
-ipa_polymorphic_call_context::stream_in (struct lto_input_block *ib,
- struct data_in *data_in)
+ipa_polymorphic_call_context::stream_in (class lto_input_block *ib,
+ class data_in *data_in)
{
struct bitpack_d bp = streamer_read_bitpack (ib);
diff --git a/gcc/ipa-predicate.c b/gcc/ipa-predicate.c
index 3b7c4dfb1dd..49622e9cd33 100644
--- a/gcc/ipa-predicate.c
+++ b/gcc/ipa-predicate.c
@@ -398,8 +398,8 @@ predicate::remap_after_duplication (clause_t possible_truths)
for other purposes). */
predicate
-predicate::remap_after_inlining (struct ipa_fn_summary *info,
- struct ipa_fn_summary *callee_info,
+predicate::remap_after_inlining (class ipa_fn_summary *info,
+ class ipa_fn_summary *callee_info,
vec<int> operand_map,
vec<int> offset_map,
clause_t possible_truths,
@@ -483,7 +483,7 @@ predicate::remap_after_inlining (struct ipa_fn_summary *info,
/* Read predicate from IB. */
void
-predicate::stream_in (struct lto_input_block *ib)
+predicate::stream_in (class lto_input_block *ib)
{
clause_t clause;
int k = 0;
@@ -522,7 +522,7 @@ predicate::stream_out (struct output_block *ob)
It can be NULL, which means this not a load from an aggregate. */
predicate
-add_condition (struct ipa_fn_summary *summary, int operand_num,
+add_condition (class ipa_fn_summary *summary, int operand_num,
HOST_WIDE_INT size, struct agg_position_info *aggpos,
enum tree_code code, tree val)
{
diff --git a/gcc/ipa-predicate.h b/gcc/ipa-predicate.h
index e97754cde77..c2adba30551 100644
--- a/gcc/ipa-predicate.h
+++ b/gcc/ipa-predicate.h
@@ -205,11 +205,11 @@ public:
predicate remap_after_duplication (clause_t);
/* Return predicate equal to THIS after inlining. */
- predicate remap_after_inlining (struct ipa_fn_summary *,
- struct ipa_fn_summary *,
+ predicate remap_after_inlining (class ipa_fn_summary *,
+ class ipa_fn_summary *,
vec<int>, vec<int>, clause_t, const predicate &);
- void stream_in (struct lto_input_block *);
+ void stream_in (class lto_input_block *);
void stream_out (struct output_block *);
private:
@@ -227,6 +227,6 @@ private:
};
void dump_condition (FILE *f, conditions conditions, int cond);
-predicate add_condition (struct ipa_fn_summary *summary, int operand_num,
+predicate add_condition (class ipa_fn_summary *summary, int operand_num,
HOST_WIDE_INT size, struct agg_position_info *aggpos,
enum tree_code code, tree val);
diff --git a/gcc/ipa-profile.c b/gcc/ipa-profile.c
index c80ea7a9b95..1fb939b73d0 100644
--- a/gcc/ipa-profile.c
+++ b/gcc/ipa-profile.c
@@ -258,7 +258,7 @@ ipa_profile_read_summary (void)
{
const char *data;
size_t len;
- struct lto_input_block *ib
+ class lto_input_block *ib
= lto_create_simple_input_block (file_data,
LTO_section_ipa_profile,
&data, &len);
diff --git a/gcc/ipa-prop.c b/gcc/ipa-prop.c
index f5f8c753631..344b78ea02f 100644
--- a/gcc/ipa-prop.c
+++ b/gcc/ipa-prop.c
@@ -203,7 +203,7 @@ ipa_get_param_decl_index_1 (vec<ipa_param_descriptor, va_gc> *descriptors,
to INFO. */
int
-ipa_get_param_decl_index (struct ipa_node_params *info, tree ptree)
+ipa_get_param_decl_index (class ipa_node_params *info, tree ptree)
{
return ipa_get_param_decl_index_1 (info->descriptors, ptree);
}
@@ -253,7 +253,7 @@ count_formal_params (tree fndecl)
using ipa_initialize_node_params. */
void
-ipa_dump_param (FILE *file, struct ipa_node_params *info, int i)
+ipa_dump_param (FILE *file, class ipa_node_params *info, int i)
{
fprintf (file, "param #%i", i);
if ((*info->descriptors)[i].decl_or_type)
@@ -269,7 +269,7 @@ ipa_dump_param (FILE *file, struct ipa_node_params *info, int i)
static bool
ipa_alloc_node_params (struct cgraph_node *node, int param_count)
{
- struct ipa_node_params *info = IPA_NODE_REF (node);
+ class ipa_node_params *info = IPA_NODE_REF (node);
if (!info->descriptors && param_count)
{
@@ -287,7 +287,7 @@ ipa_alloc_node_params (struct cgraph_node *node, int param_count)
void
ipa_initialize_node_params (struct cgraph_node *node)
{
- struct ipa_node_params *info = IPA_NODE_REF (node);
+ class ipa_node_params *info = IPA_NODE_REF (node);
if (!info->descriptors
&& ipa_alloc_node_params (node, count_formal_params (node->decl)))
@@ -375,7 +375,7 @@ ipa_print_node_jump_functions_for_edge (FILE *f, struct cgraph_edge *cs)
}
}
- struct ipa_polymorphic_call_context *ctx
+ class ipa_polymorphic_call_context *ctx
= ipa_get_ith_polymorhic_call_context (IPA_EDGE_REF (cs), i);
if (ctx && !ctx->useless_p ())
{
@@ -432,7 +432,7 @@ ipa_print_node_jump_functions (FILE *f, struct cgraph_node *node)
for (cs = node->indirect_calls; cs; cs = cs->next_callee)
{
- struct cgraph_indirect_call_info *ii;
+ class cgraph_indirect_call_info *ii;
if (!ipa_edge_args_info_available_for_edge_p (cs))
continue;
@@ -1190,7 +1190,7 @@ ipa_load_from_parm_agg (struct ipa_func_body_info *fbi,
static void
compute_complex_assign_jump_func (struct ipa_func_body_info *fbi,
- struct ipa_node_params *info,
+ class ipa_node_params *info,
struct ipa_jump_func *jfunc,
gcall *call, gimple *stmt, tree name,
tree param_type)
@@ -1346,7 +1346,7 @@ get_ancestor_addr_info (gimple *assign, tree *obj_p, HOST_WIDE_INT *offset)
static void
compute_complex_ancestor_jump_func (struct ipa_func_body_info *fbi,
- struct ipa_node_params *info,
+ class ipa_node_params *info,
struct ipa_jump_func *jfunc,
gcall *call, gphi *phi)
{
@@ -1855,8 +1855,8 @@ static void
ipa_compute_jump_functions_for_edge (struct ipa_func_body_info *fbi,
struct cgraph_edge *cs)
{
- struct ipa_node_params *info = IPA_NODE_REF (cs->caller);
- struct ipa_edge_args *args = IPA_EDGE_REF (cs);
+ class ipa_node_params *info = IPA_NODE_REF (cs->caller);
+ class ipa_edge_args *args = IPA_EDGE_REF (cs);
gcall *call = cs->call_stmt;
int n, arg_num = gimple_call_num_args (call);
bool useful_context = false;
@@ -1880,7 +1880,7 @@ ipa_compute_jump_functions_for_edge (struct ipa_func_body_info *fbi,
if (flag_devirtualize && POINTER_TYPE_P (TREE_TYPE (arg)))
{
tree instance;
- struct ipa_polymorphic_call_context context (cs->caller->decl,
+ class ipa_polymorphic_call_context context (cs->caller->decl,
arg, cs->call_stmt,
&instance);
context.get_dynamic_type (instance, arg, NULL, cs->call_stmt,
@@ -2197,7 +2197,7 @@ static void
ipa_analyze_indirect_call_uses (struct ipa_func_body_info *fbi, gcall *call,
tree target)
{
- struct ipa_node_params *info = fbi->info;
+ class ipa_node_params *info = fbi->info;
HOST_WIDE_INT offset;
bool by_ref;
@@ -2348,7 +2348,7 @@ ipa_analyze_virtual_call_uses (struct ipa_func_body_info *fbi,
if (TREE_CODE (obj) != SSA_NAME)
return;
- struct ipa_node_params *info = fbi->info;
+ class ipa_node_params *info = fbi->info;
if (SSA_NAME_IS_DEFAULT_DEF (obj))
{
struct ipa_jump_func jfunc;
@@ -2380,7 +2380,7 @@ ipa_analyze_virtual_call_uses (struct ipa_func_body_info *fbi,
}
struct cgraph_edge *cs = ipa_note_param_call (fbi->node, index, call);
- struct cgraph_indirect_call_info *ii = cs->indirect_info;
+ class cgraph_indirect_call_info *ii = cs->indirect_info;
ii->offset = anc_offset;
ii->otr_token = tree_to_uhwi (OBJ_TYPE_REF_TOKEN (target));
ii->otr_type = obj_type_ref_class (target);
@@ -2452,7 +2452,7 @@ ipa_analyze_stmt_uses (struct ipa_func_body_info *fbi, gimple *stmt)
static bool
visit_ref_for_mod_analysis (gimple *, tree op, tree, void *data)
{
- struct ipa_node_params *info = (struct ipa_node_params *) data;
+ class ipa_node_params *info = (class ipa_node_params *) data;
op = get_base_address (op);
if (op
@@ -2500,7 +2500,7 @@ ipa_analyze_params_uses_in_bb (struct ipa_func_body_info *fbi, basic_block bb)
static void
ipa_analyze_controlled_uses (struct cgraph_node *node)
{
- struct ipa_node_params *info = IPA_NODE_REF (node);
+ class ipa_node_params *info = IPA_NODE_REF (node);
for (int i = 0; i < ipa_get_param_count (info); i++)
{
@@ -2592,7 +2592,7 @@ void
ipa_analyze_node (struct cgraph_node *node)
{
struct ipa_func_body_info fbi;
- struct ipa_node_params *info;
+ class ipa_node_params *info;
ipa_check_create_node_params ();
ipa_check_create_edge_args ();
@@ -2652,22 +2652,22 @@ static void
update_jump_functions_after_inlining (struct cgraph_edge *cs,
struct cgraph_edge *e)
{
- struct ipa_edge_args *top = IPA_EDGE_REF (cs);
- struct ipa_edge_args *args = IPA_EDGE_REF (e);
+ class ipa_edge_args *top = IPA_EDGE_REF (cs);
+ class ipa_edge_args *args = IPA_EDGE_REF (e);
int count = ipa_get_cs_argument_count (args);
int i;
for (i = 0; i < count; i++)
{
struct ipa_jump_func *dst = ipa_get_ith_jump_func (args, i);
- struct ipa_polymorphic_call_context *dst_ctx
+ class ipa_polymorphic_call_context *dst_ctx
= ipa_get_ith_polymorhic_call_context (args, i);
if (dst->type == IPA_JF_ANCESTOR)
{
struct ipa_jump_func *src;
int dst_fid = dst->value.ancestor.formal_id;
- struct ipa_polymorphic_call_context *src_ctx
+ class ipa_polymorphic_call_context *src_ctx
= ipa_get_ith_polymorhic_call_context (top, dst_fid);
/* Variable number of arguments can cause havoc if we try to access
@@ -2683,7 +2683,7 @@ update_jump_functions_after_inlining (struct cgraph_edge *cs,
if (src_ctx && !src_ctx->useless_p ())
{
- struct ipa_polymorphic_call_context ctx = *src_ctx;
+ class ipa_polymorphic_call_context ctx = *src_ctx;
/* TODO: Make type preserved safe WRT contexts. */
if (!ipa_get_jf_ancestor_type_preserved (dst))
@@ -2753,12 +2753,12 @@ update_jump_functions_after_inlining (struct cgraph_edge *cs,
int dst_fid = dst->value.pass_through.formal_id;
src = ipa_get_ith_jump_func (top, dst_fid);
bool dst_agg_p = ipa_get_jf_pass_through_agg_preserved (dst);
- struct ipa_polymorphic_call_context *src_ctx
+ class ipa_polymorphic_call_context *src_ctx
= ipa_get_ith_polymorhic_call_context (top, dst_fid);
if (src_ctx && !src_ctx->useless_p ())
{
- struct ipa_polymorphic_call_context ctx = *src_ctx;
+ class ipa_polymorphic_call_context ctx = *src_ctx;
/* TODO: Make type preserved safe WRT contexts. */
if (!ipa_get_jf_pass_through_type_preserved (dst))
@@ -3231,7 +3231,7 @@ try_decrement_rdesc_refcount (struct ipa_jump_func *jfunc)
static struct cgraph_edge *
try_make_edge_direct_simple_call (struct cgraph_edge *ie,
struct ipa_jump_func *jfunc, tree target_type,
- struct ipa_node_params *new_root_info)
+ class ipa_node_params *new_root_info)
{
struct cgraph_edge *cs;
tree target;
@@ -3302,7 +3302,7 @@ ipa_impossible_devirt_target (struct cgraph_edge *ie, tree target)
static struct cgraph_edge *
try_make_edge_direct_virtual_call (struct cgraph_edge *ie,
struct ipa_jump_func *jfunc,
- struct ipa_polymorphic_call_context ctx)
+ class ipa_polymorphic_call_context ctx)
{
tree target = NULL;
bool speculative = false;
@@ -3412,9 +3412,9 @@ update_indirect_edges_after_inlining (struct cgraph_edge *cs,
struct cgraph_node *node,
vec<cgraph_edge *> *new_edges)
{
- struct ipa_edge_args *top;
+ class ipa_edge_args *top;
struct cgraph_edge *ie, *next_ie, *new_direct_edge;
- struct ipa_node_params *new_root_info, *inlined_node_info;
+ class ipa_node_params *new_root_info, *inlined_node_info;
bool res = false;
ipa_check_create_edge_args ();
@@ -3426,7 +3426,7 @@ update_indirect_edges_after_inlining (struct cgraph_edge *cs,
for (ie = node->indirect_calls; ie; ie = next_ie)
{
- struct cgraph_indirect_call_info *ici = ie->indirect_info;
+ class cgraph_indirect_call_info *ici = ie->indirect_info;
struct ipa_jump_func *jfunc;
int param_index;
cgraph_node *spec_target = NULL;
@@ -3583,11 +3583,11 @@ combine_controlled_uses_counters (int c, int d)
static void
propagate_controlled_uses (struct cgraph_edge *cs)
{
- struct ipa_edge_args *args = IPA_EDGE_REF (cs);
+ class ipa_edge_args *args = IPA_EDGE_REF (cs);
struct cgraph_node *new_root = cs->caller->global.inlined_to
? cs->caller->global.inlined_to : cs->caller;
- struct ipa_node_params *new_root_info = IPA_NODE_REF (new_root);
- struct ipa_node_params *old_root_info = IPA_NODE_REF (cs->callee);
+ class ipa_node_params *new_root_info = IPA_NODE_REF (new_root);
+ class ipa_node_params *old_root_info = IPA_NODE_REF (cs->callee);
int count, i;
count = MIN (ipa_get_cs_argument_count (args),
@@ -3881,7 +3881,7 @@ ipa_edge_args_sum_t::duplicate (cgraph_edge *src, cgraph_edge *dst,
{
struct cgraph_node *inline_root = dst->caller->global.inlined_to
? dst->caller->global.inlined_to : dst->caller;
- struct ipa_node_params *root_info = IPA_NODE_REF (inline_root);
+ class ipa_node_params *root_info = IPA_NODE_REF (inline_root);
int idx = ipa_get_jf_pass_through_formal_id (dst_jf);
int c = ipa_get_controlled_uses (root_info, idx);
@@ -4025,7 +4025,7 @@ void
ipa_print_node_params (FILE *f, struct cgraph_node *node)
{
int i, count;
- struct ipa_node_params *info;
+ class ipa_node_params *info;
if (!node->definition)
return;
@@ -4173,10 +4173,10 @@ ipa_write_jump_function (struct output_block *ob,
/* Read in jump function JUMP_FUNC from IB. */
static void
-ipa_read_jump_function (struct lto_input_block *ib,
+ipa_read_jump_function (class lto_input_block *ib,
struct ipa_jump_func *jump_func,
struct cgraph_edge *cs,
- struct data_in *data_in,
+ class data_in *data_in,
bool prevails)
{
enum jump_func_type jftype;
@@ -4285,7 +4285,7 @@ static void
ipa_write_indirect_edge_info (struct output_block *ob,
struct cgraph_edge *cs)
{
- struct cgraph_indirect_call_info *ii = cs->indirect_info;
+ class cgraph_indirect_call_info *ii = cs->indirect_info;
struct bitpack_d bp;
streamer_write_hwi (ob, ii->param_index);
@@ -4314,11 +4314,11 @@ ipa_write_indirect_edge_info (struct output_block *ob,
relevant to indirect inlining from IB. */
static void
-ipa_read_indirect_edge_info (struct lto_input_block *ib,
- struct data_in *data_in,
+ipa_read_indirect_edge_info (class lto_input_block *ib,
+ class data_in *data_in,
struct cgraph_edge *cs)
{
- struct cgraph_indirect_call_info *ii = cs->indirect_info;
+ class cgraph_indirect_call_info *ii = cs->indirect_info;
struct bitpack_d bp;
ii->param_index = (int) streamer_read_hwi (ib);
@@ -4348,7 +4348,7 @@ ipa_write_node_info (struct output_block *ob, struct cgraph_node *node)
{
int node_ref;
lto_symtab_encoder_t encoder;
- struct ipa_node_params *info = IPA_NODE_REF (node);
+ class ipa_node_params *info = IPA_NODE_REF (node);
int j;
struct cgraph_edge *e;
struct bitpack_d bp;
@@ -4375,7 +4375,7 @@ ipa_write_node_info (struct output_block *ob, struct cgraph_node *node)
}
for (e = node->callees; e; e = e->next_callee)
{
- struct ipa_edge_args *args = IPA_EDGE_REF (e);
+ class ipa_edge_args *args = IPA_EDGE_REF (e);
streamer_write_uhwi (ob,
ipa_get_cs_argument_count (args) * 2
@@ -4389,7 +4389,7 @@ ipa_write_node_info (struct output_block *ob, struct cgraph_node *node)
}
for (e = node->indirect_calls; e; e = e->next_callee)
{
- struct ipa_edge_args *args = IPA_EDGE_REF (e);
+ class ipa_edge_args *args = IPA_EDGE_REF (e);
streamer_write_uhwi (ob,
ipa_get_cs_argument_count (args) * 2
@@ -4407,8 +4407,8 @@ ipa_write_node_info (struct output_block *ob, struct cgraph_node *node)
/* Stream in edge E from IB. */
static void
-ipa_read_edge_info (struct lto_input_block *ib,
- struct data_in *data_in,
+ipa_read_edge_info (class lto_input_block *ib,
+ class data_in *data_in,
struct cgraph_edge *e, bool prevails)
{
int count = streamer_read_uhwi (ib);
@@ -4419,7 +4419,7 @@ ipa_read_edge_info (struct lto_input_block *ib,
return;
if (prevails && e->possibly_call_in_translation_unit_p ())
{
- struct ipa_edge_args *args = IPA_EDGE_REF (e);
+ class ipa_edge_args *args = IPA_EDGE_REF (e);
vec_safe_grow_cleared (args->jump_functions, count);
if (contexts_computed)
vec_safe_grow_cleared (args->polymorphic_call_contexts, count);
@@ -4441,7 +4441,7 @@ ipa_read_edge_info (struct lto_input_block *ib,
data_in, prevails);
if (contexts_computed)
{
- struct ipa_polymorphic_call_context ctx;
+ class ipa_polymorphic_call_context ctx;
ctx.stream_in (ib, data_in);
}
}
@@ -4451,14 +4451,14 @@ ipa_read_edge_info (struct lto_input_block *ib,
/* Stream in NODE info from IB. */
static void
-ipa_read_node_info (struct lto_input_block *ib, struct cgraph_node *node,
- struct data_in *data_in)
+ipa_read_node_info (class lto_input_block *ib, struct cgraph_node *node,
+ class data_in *data_in)
{
int k;
struct cgraph_edge *e;
struct bitpack_d bp;
bool prevails = node->prevailing_p ();
- struct ipa_node_params *info = prevails ? IPA_NODE_REF (node) : NULL;
+ class ipa_node_params *info = prevails ? IPA_NODE_REF (node) : NULL;
int param_count = streamer_read_uhwi (ib);
if (prevails)
@@ -4555,7 +4555,7 @@ ipa_prop_read_section (struct lto_file_decl_data *file_data, const char *data,
const int cfg_offset = sizeof (struct lto_function_header);
const int main_offset = cfg_offset + header->cfg_size;
const int string_offset = main_offset + header->main_size;
- struct data_in *data_in;
+ class data_in *data_in;
unsigned int i;
unsigned int count;
@@ -4803,7 +4803,7 @@ read_replacements_section (struct lto_file_decl_data *file_data,
const int cfg_offset = sizeof (struct lto_function_header);
const int main_offset = cfg_offset + header->cfg_size;
const int string_offset = main_offset + header->main_size;
- struct data_in *data_in;
+ class data_in *data_in;
unsigned int i;
unsigned int count;
diff --git a/gcc/ipa-prop.h b/gcc/ipa-prop.h
index 7605911f701..6470c938ecd 100644
--- a/gcc/ipa-prop.h
+++ b/gcc/ipa-prop.h
@@ -179,12 +179,12 @@ struct GTY (()) ipa_jump_func
/* Information about zero/non-zero bits. The pointed to structure is shared
betweed different jump functions. Use ipa_set_jfunc_bits to set this
field. */
- struct ipa_bits *bits;
+ class ipa_bits *bits;
/* Information about value range, containing valid data only when vr_known is
true. The pointed to structure is shared betweed different jump
functions. Use ipa_set_jfunc_vr to set this field. */
- struct value_range_base *m_vr;
+ class value_range_base *m_vr;
enum jump_func_type type;
/* Represents a value of a jump function. pass_through is used only in jump
@@ -335,7 +335,7 @@ public:
vec<ipa_param_descriptor, va_gc> *descriptors;
/* Pointer to an array of structures describing individual formal
parameters. */
- struct ipcp_param_lattices * GTY((skip)) lattices;
+ class ipcp_param_lattices * GTY((skip)) lattices;
/* Only for versioned nodes this field would not be NULL,
it points to the node that IPA cp cloned from. */
struct cgraph_node * GTY((skip)) ipcp_orig_node;
@@ -423,7 +423,7 @@ struct ipa_func_body_info
cgraph_node *node;
/* Its info. */
- struct ipa_node_params *info;
+ class ipa_node_params *info;
/* Information about individual BBs. */
vec<ipa_bb_info> bb_infos;
@@ -442,7 +442,7 @@ struct ipa_func_body_info
/* Return the number of formal parameters. */
static inline int
-ipa_get_param_count (struct ipa_node_params *info)
+ipa_get_param_count (class ipa_node_params *info)
{
return vec_safe_length (info->descriptors);
}
@@ -453,7 +453,7 @@ ipa_get_param_count (struct ipa_node_params *info)
WPA. */
static inline tree
-ipa_get_param (struct ipa_node_params *info, int i)
+ipa_get_param (class ipa_node_params *info, int i)
{
gcc_checking_assert (info->descriptors);
gcc_checking_assert (!flag_wpa);
@@ -466,7 +466,7 @@ ipa_get_param (struct ipa_node_params *info, int i)
to INFO if it is known or NULL if not. */
static inline tree
-ipa_get_type (struct ipa_node_params *info, int i)
+ipa_get_type (class ipa_node_params *info, int i)
{
if (vec_safe_length (info->descriptors) <= (unsigned) i)
return NULL;
@@ -483,7 +483,7 @@ ipa_get_type (struct ipa_node_params *info, int i)
to INFO. */
static inline int
-ipa_get_param_move_cost (struct ipa_node_params *info, int i)
+ipa_get_param_move_cost (class ipa_node_params *info, int i)
{
gcc_checking_assert (info->descriptors);
return (*info->descriptors)[i].move_cost;
@@ -493,7 +493,7 @@ ipa_get_param_move_cost (struct ipa_node_params *info, int i)
associated with INFO to VAL. */
static inline void
-ipa_set_param_used (struct ipa_node_params *info, int i, bool val)
+ipa_set_param_used (class ipa_node_params *info, int i, bool val)
{
gcc_checking_assert (info->descriptors);
(*info->descriptors)[i].used = val;
@@ -503,7 +503,7 @@ ipa_set_param_used (struct ipa_node_params *info, int i, bool val)
IPA_UNDESCRIBED_USE if there is a use that is not described by these
structures. */
static inline int
-ipa_get_controlled_uses (struct ipa_node_params *info, int i)
+ipa_get_controlled_uses (class ipa_node_params *info, int i)
{
/* FIXME: introducing speculation causes out of bounds access here. */
if (vec_safe_length (info->descriptors) > (unsigned)i)
@@ -514,7 +514,7 @@ ipa_get_controlled_uses (struct ipa_node_params *info, int i)
/* Set the controlled counter of a given parameter. */
static inline void
-ipa_set_controlled_uses (struct ipa_node_params *info, int i, int val)
+ipa_set_controlled_uses (class ipa_node_params *info, int i, int val)
{
gcc_checking_assert (info->descriptors);
(*info->descriptors)[i].controlled_uses = val;
@@ -524,7 +524,7 @@ ipa_set_controlled_uses (struct ipa_node_params *info, int i, int val)
function associated with INFO. */
static inline bool
-ipa_is_param_used (struct ipa_node_params *info, int i)
+ipa_is_param_used (class ipa_node_params *info, int i)
{
gcc_checking_assert (info->descriptors);
return (*info->descriptors)[i].used;
@@ -592,7 +592,7 @@ class GTY((for_user)) ipa_edge_args
/* Return the number of actual arguments. */
static inline int
-ipa_get_cs_argument_count (struct ipa_edge_args *args)
+ipa_get_cs_argument_count (class ipa_edge_args *args)
{
return vec_safe_length (args->jump_functions);
}
@@ -602,15 +602,15 @@ ipa_get_cs_argument_count (struct ipa_edge_args *args)
ipa_compute_jump_functions. */
static inline struct ipa_jump_func *
-ipa_get_ith_jump_func (struct ipa_edge_args *args, int i)
+ipa_get_ith_jump_func (class ipa_edge_args *args, int i)
{
return &(*args->jump_functions)[i];
}
/* Returns a pointer to the polymorphic call context for the ith argument.
NULL if contexts are not computed. */
-static inline struct ipa_polymorphic_call_context *
-ipa_get_ith_polymorhic_call_context (struct ipa_edge_args *args, int i)
+static inline class ipa_polymorphic_call_context *
+ipa_get_ith_polymorhic_call_context (class ipa_edge_args *args, int i)
{
if (!args->polymorphic_call_contexts)
return NULL;
@@ -781,11 +781,11 @@ extern object_allocator<ipcp_value<ipa_polymorphic_call_context> >
ipcp_poly_ctx_values_pool;
template <typename valtype>
-class ipcp_value_source;
+struct ipcp_value_source;
extern object_allocator<ipcp_value_source<tree> > ipcp_sources_pool;
-class ipcp_agg_lattice;
+struct ipcp_agg_lattice;
extern object_allocator<ipcp_agg_lattice> ipcp_agg_lattice_pool;
@@ -795,15 +795,15 @@ void ipa_prop_write_jump_functions (void);
void ipa_prop_read_jump_functions (void);
void ipcp_write_transformation_summaries (void);
void ipcp_read_transformation_summaries (void);
-int ipa_get_param_decl_index (struct ipa_node_params *, tree);
-tree ipa_value_from_jfunc (struct ipa_node_params *info,
+int ipa_get_param_decl_index (class ipa_node_params *, tree);
+tree ipa_value_from_jfunc (class ipa_node_params *info,
struct ipa_jump_func *jfunc, tree type);
unsigned int ipcp_transform_function (struct cgraph_node *node);
ipa_polymorphic_call_context ipa_context_from_jfunc (ipa_node_params *,
cgraph_edge *,
int,
ipa_jump_func *);
-void ipa_dump_param (FILE *, struct ipa_node_params *info, int i);
+void ipa_dump_param (FILE *, class ipa_node_params *info, int i);
void ipa_release_body_info (struct ipa_func_body_info *);
tree ipa_get_callee_param_type (struct cgraph_edge *e, int i);
diff --git a/gcc/ipa-pure-const.c b/gcc/ipa-pure-const.c
index f5e53967df1..db91d2c1a32 100644
--- a/gcc/ipa-pure-const.c
+++ b/gcc/ipa-pure-const.c
@@ -122,7 +122,7 @@ public:
enum malloc_state_e malloc_state;
};
-typedef struct funct_state_d * funct_state;
+typedef class funct_state_d * funct_state;
/* The storage of the funct_state is abstracted because there is the
possibility that it may be desirable to move this to the cgraph
@@ -1014,7 +1014,7 @@ analyze_function (struct cgraph_node *fn, bool ipa)
funct_state l;
basic_block this_block;
- l = XCNEW (struct funct_state_d);
+ l = XCNEW (class funct_state_d);
l->pure_const_state = IPA_CONST;
l->state_previously_known = IPA_NEITHER;
l->looping_previously_known = true;
@@ -1086,7 +1086,7 @@ end:
}
else
{
- struct loop *loop;
+ class loop *loop;
scev_initialize ();
FOR_EACH_LOOP (loop, 0)
if (!finite_loop_p (loop))
@@ -1279,7 +1279,7 @@ pure_const_read_summary (void)
{
const char *data;
size_t len;
- struct lto_input_block *ib
+ class lto_input_block *ib
= lto_create_simple_input_block (file_data,
LTO_section_ipa_pure_const,
&data, &len);
diff --git a/gcc/ipa-ref.h b/gcc/ipa-ref.h
index 05e105851cc..0d8e509c932 100644
--- a/gcc/ipa-ref.h
+++ b/gcc/ipa-ref.h
@@ -22,8 +22,8 @@ along with GCC; see the file COPYING3. If not see
#define GCC_IPA_REF_H
struct cgraph_node;
-class varpool_node;
-class symtab_node;
+struct varpool_node;
+struct symtab_node;
/* How the reference is done. */
diff --git a/gcc/ipa-reference.c b/gcc/ipa-reference.c
index 7b2614ffa28..78737aa9e3f 100644
--- a/gcc/ipa-reference.c
+++ b/gcc/ipa-reference.c
@@ -1069,7 +1069,7 @@ ipa_reference_read_optimization_summary (void)
{
const char *data;
size_t len;
- struct lto_input_block *ib
+ class lto_input_block *ib
= lto_create_simple_input_block (file_data,
LTO_section_ipa_reference,
&data, &len);
diff --git a/gcc/ipa-split.c b/gcc/ipa-split.c
index 44ae2c7d077..86b26d3cef3 100644
--- a/gcc/ipa-split.c
+++ b/gcc/ipa-split.c
@@ -146,7 +146,7 @@ public:
/* Best split point found. */
-struct split_point best_split_point;
+class split_point best_split_point;
/* Set of basic blocks that are not allowed to dominate a split point. */
@@ -193,7 +193,7 @@ test_nonssa_use (gimple *, tree t, tree, void *data)
/* Dump split point CURRENT. */
static void
-dump_split_point (FILE * file, struct split_point *current)
+dump_split_point (FILE * file, class split_point *current)
{
fprintf (file,
"Split point at BB %i\n"
@@ -212,7 +212,7 @@ dump_split_point (FILE * file, struct split_point *current)
Parameters are the same as for consider_split. */
static bool
-verify_non_ssa_vars (struct split_point *current, bitmap non_ssa_vars,
+verify_non_ssa_vars (class split_point *current, bitmap non_ssa_vars,
basic_block return_bb)
{
bitmap seen = BITMAP_ALLOC (NULL);
@@ -406,7 +406,7 @@ dominated_by_forbidden (basic_block bb)
/* For give split point CURRENT and return block RETURN_BB return 1
if ssa name VAL is set by split part and 0 otherwise. */
static bool
-split_part_set_ssa_name_p (tree val, struct split_point *current,
+split_part_set_ssa_name_p (tree val, class split_point *current,
basic_block return_bb)
{
if (TREE_CODE (val) != SSA_NAME)
@@ -423,7 +423,7 @@ split_part_set_ssa_name_p (tree val, struct split_point *current,
See if we can split function here. */
static void
-consider_split (struct split_point *current, bitmap non_ssa_vars,
+consider_split (class split_point *current, bitmap non_ssa_vars,
basic_block return_bb)
{
tree parm;
@@ -1035,7 +1035,7 @@ find_split_points (basic_block return_bb, sreal overall_time, int overall_size)
stack_entry first;
vec<stack_entry> stack = vNULL;
basic_block bb;
- struct split_point current;
+ class split_point current;
current.header_time = overall_time;
current.header_size = overall_size;
@@ -1181,7 +1181,7 @@ find_split_points (basic_block return_bb, sreal overall_time, int overall_size)
/* Split function at SPLIT_POINT. */
static void
-split_function (basic_block return_bb, struct split_point *split_point,
+split_function (basic_block return_bb, class split_point *split_point,
bool add_tsan_func_exit)
{
vec<tree> args_to_pass = vNULL;
diff --git a/gcc/ira-build.c b/gcc/ira-build.c
index 83caa3a8ed8..c7457fa4431 100644
--- a/gcc/ira-build.c
+++ b/gcc/ira-build.c
@@ -253,10 +253,10 @@ finish_loop_tree_nodes (void)
loop designating the whole function when CFG loops are not
built. */
static void
-add_loop_to_tree (struct loop *loop)
+add_loop_to_tree (class loop *loop)
{
int loop_num;
- struct loop *parent;
+ class loop *parent;
ira_loop_tree_node_t loop_node, parent_node;
/* We cannot use loop node access macros here because of potential
@@ -331,7 +331,7 @@ static void
form_loop_tree (void)
{
basic_block bb;
- struct loop *parent;
+ class loop *parent;
ira_loop_tree_node_t bb_node, loop_node;
/* We cannot use loop/bb node access macros because of potential
@@ -2168,7 +2168,7 @@ low_pressure_loop_node_p (ira_loop_tree_node_t node)
form a region from such loop if the target use stack register
because reg-stack.c cannot deal with such edges. */
static bool
-loop_with_complex_edge_p (struct loop *loop)
+loop_with_complex_edge_p (class loop *loop)
{
int i;
edge_iterator ei;
diff --git a/gcc/ira-color.c b/gcc/ira-color.c
index 8a90ae1b4e6..99236994d64 100644
--- a/gcc/ira-color.c
+++ b/gcc/ira-color.c
@@ -4557,7 +4557,7 @@ ira_reuse_stack_slot (int regno, poly_uint64 inherent_size,
ira_allocno_t another_allocno, allocno = ira_regno_allocno_map[regno];
rtx x;
bitmap_iterator bi;
- struct ira_spilled_reg_stack_slot *slot = NULL;
+ class ira_spilled_reg_stack_slot *slot = NULL;
ira_assert (! ira_use_lra_p);
@@ -4669,7 +4669,7 @@ ira_reuse_stack_slot (int regno, poly_uint64 inherent_size,
void
ira_mark_new_stack_slot (rtx x, int regno, poly_uint64 total_size)
{
- struct ira_spilled_reg_stack_slot *slot;
+ class ira_spilled_reg_stack_slot *slot;
int slot_num;
ira_allocno_t allocno;
diff --git a/gcc/ira-int.h b/gcc/ira-int.h
index 08848f0e817..92b7dfb1119 100644
--- a/gcc/ira-int.h
+++ b/gcc/ira-int.h
@@ -82,7 +82,7 @@ struct ira_loop_tree_node
/* The node represents basic block if children == NULL. */
basic_block bb; /* NULL for loop. */
/* NULL for BB or for loop tree root if we did not build CFG loop tree. */
- struct loop *loop;
+ class loop *loop;
/* NEXT/SUBLOOP_NEXT is the next node/loop-node of the same parent.
SUBLOOP_NEXT is always NULL for BBs. */
ira_loop_tree_node_t subloop_next, next;
@@ -613,7 +613,7 @@ extern int ira_spilled_reg_stack_slots_num;
/* The following array contains info about spilled pseudo-registers
stack slots used in current function so far. */
-extern struct ira_spilled_reg_stack_slot *ira_spilled_reg_stack_slots;
+extern class ira_spilled_reg_stack_slot *ira_spilled_reg_stack_slots;
/* Correspondingly overall cost of the allocation, cost of the
allocnos assigned to hard-registers, cost of the allocnos assigned
@@ -909,9 +909,9 @@ public:
bool x_ira_prohibited_mode_move_regs_initialized_p;
};
-extern struct target_ira_int default_target_ira_int;
+extern class target_ira_int default_target_ira_int;
#if SWITCHABLE_TARGET
-extern struct target_ira_int *this_target_ira_int;
+extern class target_ira_int *this_target_ira_int;
#else
#define this_target_ira_int (&default_target_ira_int)
#endif
diff --git a/gcc/ira.c b/gcc/ira.c
index 214fdffc5fe..c58daba6e79 100644
--- a/gcc/ira.c
+++ b/gcc/ira.c
@@ -394,10 +394,10 @@ along with GCC; see the file COPYING3. If not see
#include "print-rtl.h"
struct target_ira default_target_ira;
-struct target_ira_int default_target_ira_int;
+class target_ira_int default_target_ira_int;
#if SWITCHABLE_TARGET
struct target_ira *this_target_ira = &default_target_ira;
-struct target_ira_int *this_target_ira_int = &default_target_ira_int;
+class target_ira_int *this_target_ira_int = &default_target_ira_int;
#endif
/* A modified value of flag `-fira-verbose' used internally. */
@@ -411,7 +411,7 @@ int ira_spilled_reg_stack_slots_num;
/* The following array contains info about spilled pseudo-registers
stack slots used in current function so far. */
-struct ira_spilled_reg_stack_slot *ira_spilled_reg_stack_slots;
+class ira_spilled_reg_stack_slot *ira_spilled_reg_stack_slots;
/* Correspondingly overall cost of the allocation, overall cost before
reload, cost of the allocnos assigned to hard-registers, cost of
@@ -4061,7 +4061,7 @@ setup_reg_equiv (void)
/* Print chain C to FILE. */
static void
-print_insn_chain (FILE *file, struct insn_chain *c)
+print_insn_chain (FILE *file, class insn_chain *c)
{
fprintf (file, "insn=%d, ", INSN_UID (c->insn));
bitmap_print (file, &c->live_throughout, "live_throughout: ", ", ");
@@ -4073,7 +4073,7 @@ print_insn_chain (FILE *file, struct insn_chain *c)
static void
print_insn_chains (FILE *file)
{
- struct insn_chain *c;
+ class insn_chain *c;
for (c = reload_insn_chain; c ; c = c->next)
print_insn_chain (file, c);
}
@@ -4134,10 +4134,10 @@ static void
build_insn_chain (void)
{
unsigned int i;
- struct insn_chain **p = &reload_insn_chain;
+ class insn_chain **p = &reload_insn_chain;
basic_block bb;
- struct insn_chain *c = NULL;
- struct insn_chain *next = NULL;
+ class insn_chain *c = NULL;
+ class insn_chain *next = NULL;
auto_bitmap live_relevant_regs;
auto_bitmap elim_regset;
/* live_subregs is a vector used to keep accurate information about
@@ -5467,11 +5467,11 @@ ira (FILE *f)
{
ira_spilled_reg_stack_slots_num = 0;
ira_spilled_reg_stack_slots
- = ((struct ira_spilled_reg_stack_slot *)
+ = ((class ira_spilled_reg_stack_slot *)
ira_allocate (max_regno
- * sizeof (struct ira_spilled_reg_stack_slot)));
+ * sizeof (class ira_spilled_reg_stack_slot)));
memset ((void *)ira_spilled_reg_stack_slots, 0,
- max_regno * sizeof (struct ira_spilled_reg_stack_slot));
+ max_regno * sizeof (class ira_spilled_reg_stack_slot));
}
}
allocate_initial_values ();
diff --git a/gcc/loop-doloop.c b/gcc/loop-doloop.c
index 89714be76c4..0efe7b449ff 100644
--- a/gcc/loop-doloop.c
+++ b/gcc/loop-doloop.c
@@ -263,7 +263,7 @@ doloop_condition_get (rtx_insn *doloop_pat)
describes the number of iterations of the loop. */
static bool
-doloop_valid_p (struct loop *loop, struct niter_desc *desc)
+doloop_valid_p (class loop *loop, class niter_desc *desc)
{
basic_block *body = get_loop_body (loop), bb;
rtx_insn *insn;
@@ -405,7 +405,7 @@ add_test (rtx cond, edge *e, basic_block dest)
DOLOOP_SEQ. COUNT is the number of iterations of the LOOP. */
static void
-doloop_modify (struct loop *loop, struct niter_desc *desc,
+doloop_modify (class loop *loop, class niter_desc *desc,
rtx_insn *doloop_seq, rtx condition, rtx count)
{
rtx counter_reg;
@@ -603,7 +603,7 @@ record_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
modified. */
static bool
-doloop_optimize (struct loop *loop)
+doloop_optimize (class loop *loop)
{
scalar_int_mode mode;
rtx doloop_reg;
@@ -614,7 +614,7 @@ doloop_optimize (struct loop *loop)
unsigned level;
HOST_WIDE_INT est_niter;
int max_cost;
- struct niter_desc *desc;
+ class niter_desc *desc;
unsigned word_mode_size;
unsigned HOST_WIDE_INT word_mode_max;
int entered_at_top;
@@ -754,7 +754,7 @@ doloop_optimize (struct loop *loop)
void
doloop_optimize_loops (void)
{
- struct loop *loop;
+ class loop *loop;
if (optimize == 1)
{
diff --git a/gcc/loop-init.c b/gcc/loop-init.c
index bdfa3856a94..4b3bbccd635 100644
--- a/gcc/loop-init.c
+++ b/gcc/loop-init.c
@@ -135,7 +135,7 @@ loop_optimizer_init (unsigned flags)
void
loop_optimizer_finalize (struct function *fn)
{
- struct loop *loop;
+ class loop *loop;
basic_block bb;
timevar_push (TV_LOOP_FINI);
@@ -194,7 +194,7 @@ fix_loop_structure (bitmap changed_bbs)
{
basic_block bb;
int record_exits = 0;
- struct loop *loop;
+ class loop *loop;
unsigned old_nloops, i;
timevar_push (TV_LOOP_INIT);
@@ -237,7 +237,7 @@ fix_loop_structure (bitmap changed_bbs)
while (loop->inner)
{
- struct loop *ploop = loop->inner;
+ class loop *ploop = loop->inner;
flow_loop_tree_node_remove (ploop);
flow_loop_tree_node_add (loop_outer (loop), ploop);
}
diff --git a/gcc/loop-invariant.c b/gcc/loop-invariant.c
index 4fc73d4917b..644ecfc6fbb 100644
--- a/gcc/loop-invariant.c
+++ b/gcc/loop-invariant.c
@@ -61,7 +61,7 @@ along with GCC; see the file COPYING3. If not see
class loop_data
{
public:
- struct loop *outermost_exit; /* The outermost exit of the loop. */
+ class loop *outermost_exit; /* The outermost exit of the loop. */
bool has_call; /* True if the loop contains a call. */
/* Maximal register pressure inside loop for given register class
(defined only for the pressure classes). */
@@ -71,7 +71,7 @@ public:
bitmap_head regs_live;
};
-#define LOOP_DATA(LOOP) ((struct loop_data *) (LOOP)->aux)
+#define LOOP_DATA(LOOP) ((class loop_data *) (LOOP)->aux)
/* The description of an use. */
@@ -144,7 +144,7 @@ struct invariant
};
/* Currently processed loop. */
-static struct loop *curr_loop;
+static class loop *curr_loop;
/* Table of invariants indexed by the df_ref uid field. */
@@ -558,7 +558,7 @@ merge_identical_invariants (void)
get_loop_body_in_dom_order. */
static void
-compute_always_reached (struct loop *loop, basic_block *body,
+compute_always_reached (class loop *loop, basic_block *body,
bitmap may_exit, bitmap always_reached)
{
unsigned i;
@@ -578,13 +578,13 @@ compute_always_reached (struct loop *loop, basic_block *body,
additionally mark blocks that may exit due to a call. */
static void
-find_exits (struct loop *loop, basic_block *body,
+find_exits (class loop *loop, basic_block *body,
bitmap may_exit, bitmap has_exit)
{
unsigned i;
edge_iterator ei;
edge e;
- struct loop *outermost_exit = loop, *aexit;
+ class loop *outermost_exit = loop, *aexit;
bool has_call = false;
rtx_insn *insn;
@@ -645,7 +645,7 @@ find_exits (struct loop *loop, basic_block *body,
if (loop->aux == NULL)
{
- loop->aux = xcalloc (1, sizeof (struct loop_data));
+ loop->aux = xcalloc (1, sizeof (class loop_data));
bitmap_initialize (&LOOP_DATA (loop)->regs_ref, &reg_obstack);
bitmap_initialize (&LOOP_DATA (loop)->regs_live, &reg_obstack);
}
@@ -673,7 +673,7 @@ may_assign_reg_p (rtx x)
BODY. */
static void
-find_defs (struct loop *loop)
+find_defs (class loop *loop)
{
if (dump_file)
{
@@ -1210,7 +1210,7 @@ find_invariants_bb (basic_block bb, bool always_reached, bool always_executed)
ends due to a function call. */
static void
-find_invariants_body (struct loop *loop, basic_block *body,
+find_invariants_body (class loop *loop, basic_block *body,
bitmap always_reached, bitmap always_executed)
{
unsigned i;
@@ -1224,7 +1224,7 @@ find_invariants_body (struct loop *loop, basic_block *body,
/* Finds invariants in LOOP. */
static void
-find_invariants (struct loop *loop)
+find_invariants (class loop *loop)
{
auto_bitmap may_exit;
auto_bitmap always_reached;
@@ -1687,7 +1687,7 @@ replace_uses (struct invariant *inv, rtx reg, bool in_group)
the block preceding its header. */
static bool
-can_move_invariant_reg (struct loop *loop, struct invariant *inv, rtx reg)
+can_move_invariant_reg (class loop *loop, struct invariant *inv, rtx reg)
{
df_ref def, use;
unsigned int dest_regno, defs_in_loop_count = 0;
@@ -1760,7 +1760,7 @@ can_move_invariant_reg (struct loop *loop, struct invariant *inv, rtx reg)
otherwise. */
static bool
-move_invariant_reg (struct loop *loop, unsigned invno)
+move_invariant_reg (class loop *loop, unsigned invno)
{
struct invariant *inv = invariants[invno];
struct invariant *repr = invariants[inv->eqto];
@@ -1866,7 +1866,7 @@ fail:
in TEMPORARY_REGS. */
static void
-move_invariants (struct loop *loop)
+move_invariants (class loop *loop)
{
struct invariant *inv;
unsigned i;
@@ -1939,7 +1939,7 @@ free_inv_motion_data (void)
/* Move the invariants out of the LOOP. */
static void
-move_single_loop_invariants (struct loop *loop)
+move_single_loop_invariants (class loop *loop)
{
init_inv_motion_data ();
@@ -1954,9 +1954,9 @@ move_single_loop_invariants (struct loop *loop)
/* Releases the auxiliary data for LOOP. */
static void
-free_loop_data (struct loop *loop)
+free_loop_data (class loop *loop)
{
- struct loop_data *data = LOOP_DATA (loop);
+ class loop_data *data = LOOP_DATA (loop);
if (!data)
return;
@@ -2035,7 +2035,7 @@ change_pressure (int regno, bool incr_p)
static void
mark_regno_live (int regno)
{
- struct loop *loop;
+ class loop *loop;
for (loop = curr_loop;
loop != current_loops->tree_root;
@@ -2104,7 +2104,7 @@ mark_ref_regs (rtx x)
code = GET_CODE (x);
if (code == REG)
{
- struct loop *loop;
+ class loop *loop;
for (loop = curr_loop;
loop != current_loops->tree_root;
@@ -2136,12 +2136,12 @@ calculate_loop_reg_pressure (void)
basic_block bb;
rtx_insn *insn;
rtx link;
- struct loop *loop, *parent;
+ class loop *loop, *parent;
FOR_EACH_LOOP (loop, 0)
if (loop->aux == NULL)
{
- loop->aux = xcalloc (1, sizeof (struct loop_data));
+ loop->aux = xcalloc (1, sizeof (class loop_data));
bitmap_initialize (&LOOP_DATA (loop)->regs_ref, &reg_obstack);
bitmap_initialize (&LOOP_DATA (loop)->regs_live, &reg_obstack);
}
@@ -2253,7 +2253,7 @@ calculate_loop_reg_pressure (void)
void
move_loop_invariants (void)
{
- struct loop *loop;
+ class loop *loop;
if (optimize == 1)
df_live_add_problem ();
diff --git a/gcc/loop-iv.c b/gcc/loop-iv.c
index f078a68d8bf..2274cc3075b 100644
--- a/gcc/loop-iv.c
+++ b/gcc/loop-iv.c
@@ -89,7 +89,7 @@ class biv_entry
{
public:
unsigned regno; /* The register of the biv. */
- struct rtx_iv iv; /* Value of the biv. */
+ class rtx_iv iv; /* Value of the biv. */
};
static bool clean_slate = true;
@@ -97,7 +97,7 @@ static bool clean_slate = true;
static unsigned int iv_ref_table_size = 0;
/* Table of rtx_ivs indexed by the df_ref uid field. */
-static struct rtx_iv ** iv_ref_table;
+static class rtx_iv ** iv_ref_table;
/* Induction variable stored at the reference. */
#define DF_REF_IV(REF) iv_ref_table[DF_REF_ID (REF)]
@@ -105,7 +105,7 @@ static struct rtx_iv ** iv_ref_table;
/* The current loop. */
-static struct loop *current_loop;
+static class loop *current_loop;
/* Hashtable helper. */
@@ -136,7 +136,7 @@ biv_entry_hasher::equal (const biv_entry *b, const rtx_def *r)
static hash_table<biv_entry_hasher> *bivs;
-static bool iv_analyze_op (rtx_insn *, scalar_int_mode, rtx, struct rtx_iv *);
+static bool iv_analyze_op (rtx_insn *, scalar_int_mode, rtx, class rtx_iv *);
/* Return the RTX code corresponding to the IV extend code EXTEND. */
static inline enum rtx_code
@@ -156,9 +156,9 @@ iv_extend_to_rtx_code (enum iv_extend_code extend)
/* Dumps information about IV to FILE. */
-extern void dump_iv_info (FILE *, struct rtx_iv *);
+extern void dump_iv_info (FILE *, class rtx_iv *);
void
-dump_iv_info (FILE *file, struct rtx_iv *iv)
+dump_iv_info (FILE *file, class rtx_iv *iv)
{
if (!iv->base)
{
@@ -204,9 +204,9 @@ check_iv_ref_table_size (void)
if (iv_ref_table_size < DF_DEFS_TABLE_SIZE ())
{
unsigned int new_size = DF_DEFS_TABLE_SIZE () + (DF_DEFS_TABLE_SIZE () / 4);
- iv_ref_table = XRESIZEVEC (struct rtx_iv *, iv_ref_table, new_size);
+ iv_ref_table = XRESIZEVEC (class rtx_iv *, iv_ref_table, new_size);
memset (&iv_ref_table[iv_ref_table_size], 0,
- (new_size - iv_ref_table_size) * sizeof (struct rtx_iv *));
+ (new_size - iv_ref_table_size) * sizeof (class rtx_iv *));
iv_ref_table_size = new_size;
}
}
@@ -245,7 +245,7 @@ static void
clear_iv_info (void)
{
unsigned i, n_defs = DF_DEFS_TABLE_SIZE ();
- struct rtx_iv *iv;
+ class rtx_iv *iv;
check_iv_ref_table_size ();
for (i = 0; i < n_defs; i++)
@@ -265,7 +265,7 @@ clear_iv_info (void)
/* Prepare the data for an induction variable analysis of a LOOP. */
void
-iv_analysis_loop_init (struct loop *loop)
+iv_analysis_loop_init (class loop *loop)
{
current_loop = loop;
@@ -303,7 +303,7 @@ latch_dominating_def (rtx reg, df_ref *def)
{
df_ref single_rd = NULL, adef;
unsigned regno = REGNO (reg);
- struct df_rd_bb_info *bb_info = DF_RD_BB_INFO (current_loop->latch);
+ class df_rd_bb_info *bb_info = DF_RD_BB_INFO (current_loop->latch);
for (adef = DF_REG_DEF_CHAIN (regno); adef; adef = DF_REF_NEXT_REG (adef))
{
@@ -386,7 +386,7 @@ iv_get_reaching_def (rtx_insn *insn, rtx reg, df_ref *def)
consistency with other iv manipulation functions that may fail). */
static bool
-iv_constant (struct rtx_iv *iv, scalar_int_mode mode, rtx cst)
+iv_constant (class rtx_iv *iv, scalar_int_mode mode, rtx cst)
{
iv->mode = mode;
iv->base = cst;
@@ -403,7 +403,7 @@ iv_constant (struct rtx_iv *iv, scalar_int_mode mode, rtx cst)
/* Evaluates application of subreg to MODE on IV. */
static bool
-iv_subreg (struct rtx_iv *iv, scalar_int_mode mode)
+iv_subreg (class rtx_iv *iv, scalar_int_mode mode)
{
/* If iv is invariant, just calculate the new value. */
if (iv->step == const0_rtx
@@ -445,7 +445,7 @@ iv_subreg (struct rtx_iv *iv, scalar_int_mode mode)
/* Evaluates application of EXTEND to MODE on IV. */
static bool
-iv_extend (struct rtx_iv *iv, enum iv_extend_code extend, scalar_int_mode mode)
+iv_extend (class rtx_iv *iv, enum iv_extend_code extend, scalar_int_mode mode)
{
/* If iv is invariant, just calculate the new value. */
if (iv->step == const0_rtx
@@ -483,7 +483,7 @@ iv_extend (struct rtx_iv *iv, enum iv_extend_code extend, scalar_int_mode mode)
/* Evaluates negation of IV. */
static bool
-iv_neg (struct rtx_iv *iv)
+iv_neg (class rtx_iv *iv)
{
if (iv->extend == IV_UNKNOWN_EXTEND)
{
@@ -506,7 +506,7 @@ iv_neg (struct rtx_iv *iv)
/* Evaluates addition or subtraction (according to OP) of IV1 to IV0. */
static bool
-iv_add (struct rtx_iv *iv0, struct rtx_iv *iv1, enum rtx_code op)
+iv_add (class rtx_iv *iv0, class rtx_iv *iv1, enum rtx_code op)
{
scalar_int_mode mode;
rtx arg;
@@ -576,7 +576,7 @@ iv_add (struct rtx_iv *iv0, struct rtx_iv *iv1, enum rtx_code op)
/* Evaluates multiplication of IV by constant CST. */
static bool
-iv_mult (struct rtx_iv *iv, rtx mby)
+iv_mult (class rtx_iv *iv, rtx mby)
{
scalar_int_mode mode = iv->extend_mode;
@@ -601,7 +601,7 @@ iv_mult (struct rtx_iv *iv, rtx mby)
/* Evaluates shift of IV by constant CST. */
static bool
-iv_shift (struct rtx_iv *iv, rtx mby)
+iv_shift (class rtx_iv *iv, rtx mby)
{
scalar_int_mode mode = iv->extend_mode;
@@ -811,9 +811,9 @@ get_biv_step (df_ref last_def, scalar_int_mode outer_mode, rtx reg,
/* Records information that DEF is induction variable IV. */
static void
-record_iv (df_ref def, struct rtx_iv *iv)
+record_iv (df_ref def, class rtx_iv *iv)
{
- struct rtx_iv *recorded_iv = XNEW (struct rtx_iv);
+ class rtx_iv *recorded_iv = XNEW (class rtx_iv);
*recorded_iv = *iv;
check_iv_ref_table_size ();
@@ -824,9 +824,9 @@ record_iv (df_ref def, struct rtx_iv *iv)
IV and return true. Otherwise return false. */
static bool
-analyzed_for_bivness_p (rtx def, struct rtx_iv *iv)
+analyzed_for_bivness_p (rtx def, class rtx_iv *iv)
{
- struct biv_entry *biv = bivs->find_with_hash (def, REGNO (def));
+ class biv_entry *biv = bivs->find_with_hash (def, REGNO (def));
if (!biv)
return false;
@@ -836,9 +836,9 @@ analyzed_for_bivness_p (rtx def, struct rtx_iv *iv)
}
static void
-record_biv (rtx def, struct rtx_iv *iv)
+record_biv (rtx def, class rtx_iv *iv)
{
- struct biv_entry *biv = XNEW (struct biv_entry);
+ class biv_entry *biv = XNEW (class biv_entry);
biv_entry **slot = bivs->find_slot_with_hash (def, REGNO (def), INSERT);
biv->regno = REGNO (def);
@@ -851,7 +851,7 @@ record_biv (rtx def, struct rtx_iv *iv)
to *IV. OUTER_MODE is the mode of DEF. */
static bool
-iv_analyze_biv (scalar_int_mode outer_mode, rtx def, struct rtx_iv *iv)
+iv_analyze_biv (scalar_int_mode outer_mode, rtx def, class rtx_iv *iv)
{
rtx inner_step, outer_step;
scalar_int_mode inner_mode;
@@ -929,11 +929,11 @@ iv_analyze_biv (scalar_int_mode outer_mode, rtx def, struct rtx_iv *iv)
bool
iv_analyze_expr (rtx_insn *insn, scalar_int_mode mode, rtx rhs,
- struct rtx_iv *iv)
+ class rtx_iv *iv)
{
rtx mby = NULL_RTX;
rtx op0 = NULL_RTX, op1 = NULL_RTX;
- struct rtx_iv iv0, iv1;
+ class rtx_iv iv0, iv1;
enum rtx_code code = GET_CODE (rhs);
scalar_int_mode omode = mode;
@@ -1040,7 +1040,7 @@ iv_analyze_expr (rtx_insn *insn, scalar_int_mode mode, rtx rhs,
/* Analyzes iv DEF and stores the result to *IV. */
static bool
-iv_analyze_def (df_ref def, struct rtx_iv *iv)
+iv_analyze_def (df_ref def, class rtx_iv *iv)
{
rtx_insn *insn = DF_REF_INSN (def);
rtx reg = DF_REF_REG (def);
@@ -1104,7 +1104,7 @@ iv_analyze_def (df_ref def, struct rtx_iv *iv)
mode of OP. */
static bool
-iv_analyze_op (rtx_insn *insn, scalar_int_mode mode, rtx op, struct rtx_iv *iv)
+iv_analyze_op (rtx_insn *insn, scalar_int_mode mode, rtx op, class rtx_iv *iv)
{
df_ref def = NULL;
enum iv_grd_result res;
@@ -1165,7 +1165,7 @@ iv_analyze_op (rtx_insn *insn, scalar_int_mode mode, rtx op, struct rtx_iv *iv)
mode of VAL. */
bool
-iv_analyze (rtx_insn *insn, scalar_int_mode mode, rtx val, struct rtx_iv *iv)
+iv_analyze (rtx_insn *insn, scalar_int_mode mode, rtx val, class rtx_iv *iv)
{
rtx reg;
@@ -1190,7 +1190,7 @@ iv_analyze (rtx_insn *insn, scalar_int_mode mode, rtx val, struct rtx_iv *iv)
/* Analyzes definition of DEF in INSN and stores the result to IV. */
bool
-iv_analyze_result (rtx_insn *insn, rtx def, struct rtx_iv *iv)
+iv_analyze_result (rtx_insn *insn, rtx def, class rtx_iv *iv)
{
df_ref adef;
@@ -1210,7 +1210,7 @@ iv_analyze_result (rtx_insn *insn, rtx def, struct rtx_iv *iv)
bool
biv_p (rtx_insn *insn, scalar_int_mode mode, rtx reg)
{
- struct rtx_iv iv;
+ class rtx_iv iv;
df_ref def, last_def;
if (!simple_reg_p (reg))
@@ -1232,7 +1232,7 @@ biv_p (rtx_insn *insn, scalar_int_mode mode, rtx reg)
/* Calculates value of IV at ITERATION-th iteration. */
rtx
-get_iv_value (struct rtx_iv *iv, rtx iteration)
+get_iv_value (class rtx_iv *iv, rtx iteration)
{
rtx val;
@@ -1851,7 +1851,7 @@ eliminate_implied_conditions (enum rtx_code op, rtx *head, rtx tail)
is a list, its elements are assumed to be combined using OP. */
static void
-simplify_using_initial_values (struct loop *loop, enum rtx_code op, rtx *expr)
+simplify_using_initial_values (class loop *loop, enum rtx_code op, rtx *expr)
{
bool expression_valid;
rtx head, tail, last_valid_expr;
@@ -2072,8 +2072,8 @@ simplify_using_initial_values (struct loop *loop, enum rtx_code op, rtx *expr)
is SIGNED_P to DESC. */
static void
-shorten_into_mode (struct rtx_iv *iv, scalar_int_mode mode,
- enum rtx_code cond, bool signed_p, struct niter_desc *desc)
+shorten_into_mode (class rtx_iv *iv, scalar_int_mode mode,
+ enum rtx_code cond, bool signed_p, class niter_desc *desc)
{
rtx mmin, mmax, cond_over, cond_under;
@@ -2131,8 +2131,8 @@ shorten_into_mode (struct rtx_iv *iv, scalar_int_mode mode,
some assumptions to DESC). */
static bool
-canonicalize_iv_subregs (struct rtx_iv *iv0, struct rtx_iv *iv1,
- enum rtx_code cond, struct niter_desc *desc)
+canonicalize_iv_subregs (class rtx_iv *iv0, class rtx_iv *iv1,
+ enum rtx_code cond, class niter_desc *desc)
{
scalar_int_mode comp_mode;
bool signed_p;
@@ -2247,7 +2247,7 @@ canonicalize_iv_subregs (struct rtx_iv *iv0, struct rtx_iv *iv1,
expression for the number of iterations, before we tried to simplify it. */
static uint64_t
-determine_max_iter (struct loop *loop, struct niter_desc *desc, rtx old_niter)
+determine_max_iter (class loop *loop, class niter_desc *desc, rtx old_niter)
{
rtx niter = desc->niter_expr;
rtx mmin, mmax, cmp;
@@ -2305,11 +2305,11 @@ determine_max_iter (struct loop *loop, struct niter_desc *desc, rtx old_niter)
(basically its rtl version), complicated by things like subregs. */
static void
-iv_number_of_iterations (struct loop *loop, rtx_insn *insn, rtx condition,
- struct niter_desc *desc)
+iv_number_of_iterations (class loop *loop, rtx_insn *insn, rtx condition,
+ class niter_desc *desc)
{
rtx op0, op1, delta, step, bound, may_xform, tmp, tmp0, tmp1;
- struct rtx_iv iv0, iv1;
+ class rtx_iv iv0, iv1;
rtx assumption, may_not_xform;
enum rtx_code cond;
machine_mode nonvoid_mode;
@@ -2867,7 +2867,7 @@ fail:
into DESC. */
static void
-check_simple_exit (struct loop *loop, edge e, struct niter_desc *desc)
+check_simple_exit (class loop *loop, edge e, class niter_desc *desc)
{
basic_block exit_bb;
rtx condition;
@@ -2915,12 +2915,12 @@ check_simple_exit (struct loop *loop, edge e, struct niter_desc *desc)
/* Finds a simple exit of LOOP and stores its description into DESC. */
void
-find_simple_exit (struct loop *loop, struct niter_desc *desc)
+find_simple_exit (class loop *loop, class niter_desc *desc)
{
unsigned i;
basic_block *body;
edge e;
- struct niter_desc act;
+ class niter_desc act;
bool any = false;
edge_iterator ei;
@@ -3018,10 +3018,10 @@ find_simple_exit (struct loop *loop, struct niter_desc *desc)
/* Creates a simple loop description of LOOP if it was not computed
already. */
-struct niter_desc *
-get_simple_loop_desc (struct loop *loop)
+class niter_desc *
+get_simple_loop_desc (class loop *loop)
{
- struct niter_desc *desc = simple_loop_desc (loop);
+ class niter_desc *desc = simple_loop_desc (loop);
if (desc)
return desc;
@@ -3038,9 +3038,9 @@ get_simple_loop_desc (struct loop *loop)
/* Releases simple loop description for LOOP. */
void
-free_simple_loop_desc (struct loop *loop)
+free_simple_loop_desc (class loop *loop)
{
- struct niter_desc *desc = simple_loop_desc (loop);
+ class niter_desc *desc = simple_loop_desc (loop);
if (!desc)
return;
diff --git a/gcc/loop-unroll.c b/gcc/loop-unroll.c
index 1b4c73b61c8..63fccd23fae 100644
--- a/gcc/loop-unroll.c
+++ b/gcc/loop-unroll.c
@@ -163,19 +163,19 @@ struct opt_info
basic_block loop_preheader; /* The loop preheader basic block. */
};
-static void decide_unroll_stupid (struct loop *, int);
-static void decide_unroll_constant_iterations (struct loop *, int);
-static void decide_unroll_runtime_iterations (struct loop *, int);
-static void unroll_loop_stupid (struct loop *);
+static void decide_unroll_stupid (class loop *, int);
+static void decide_unroll_constant_iterations (class loop *, int);
+static void decide_unroll_runtime_iterations (class loop *, int);
+static void unroll_loop_stupid (class loop *);
static void decide_unrolling (int);
-static void unroll_loop_constant_iterations (struct loop *);
-static void unroll_loop_runtime_iterations (struct loop *);
-static struct opt_info *analyze_insns_in_loop (struct loop *);
+static void unroll_loop_constant_iterations (class loop *);
+static void unroll_loop_runtime_iterations (class loop *);
+static struct opt_info *analyze_insns_in_loop (class loop *);
static void opt_info_start_duplication (struct opt_info *);
static void apply_opt_in_copies (struct opt_info *, unsigned, bool, bool);
static void free_opt_info (struct opt_info *);
-static struct var_to_expand *analyze_insn_to_expand_var (struct loop*, rtx_insn *);
-static bool referenced_in_one_insn_in_loop_p (struct loop *, rtx, int *);
+static struct var_to_expand *analyze_insn_to_expand_var (class loop*, rtx_insn *);
+static bool referenced_in_one_insn_in_loop_p (class loop *, rtx, int *);
static struct iv_to_split *analyze_iv_to_split_insn (rtx_insn *);
static void expand_var_during_unrolling (struct var_to_expand *, rtx_insn *);
static void insert_var_expansion_initialization (struct var_to_expand *,
@@ -189,7 +189,7 @@ static rtx get_expansion (struct var_to_expand *);
appropriate given the dump or -fopt-info settings. */
static void
-report_unroll (struct loop *loop, dump_location_t locus)
+report_unroll (class loop *loop, dump_location_t locus)
{
dump_flags_t report_flags = MSG_OPTIMIZED_LOCATIONS | TDF_DETAILS;
@@ -215,7 +215,7 @@ report_unroll (struct loop *loop, dump_location_t locus)
static void
decide_unrolling (int flags)
{
- struct loop *loop;
+ class loop *loop;
/* Scan the loops, inner ones first. */
FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
@@ -279,7 +279,7 @@ decide_unrolling (int flags)
void
unroll_loops (int flags)
{
- struct loop *loop;
+ class loop *loop;
bool changed = false;
/* Now decide rest of unrolling. */
@@ -322,9 +322,9 @@ unroll_loops (int flags)
/* Check whether exit of the LOOP is at the end of loop body. */
static bool
-loop_exit_at_end_p (struct loop *loop)
+loop_exit_at_end_p (class loop *loop)
{
- struct niter_desc *desc = get_simple_loop_desc (loop);
+ class niter_desc *desc = get_simple_loop_desc (loop);
rtx_insn *insn;
/* We should never have conditional in latch block. */
@@ -347,10 +347,10 @@ loop_exit_at_end_p (struct loop *loop)
and how much. */
static void
-decide_unroll_constant_iterations (struct loop *loop, int flags)
+decide_unroll_constant_iterations (class loop *loop, int flags)
{
unsigned nunroll, nunroll_by_av, best_copies, best_unroll = 0, n_copies, i;
- struct niter_desc *desc;
+ class niter_desc *desc;
widest_int iterations;
/* If we were not asked to unroll this loop, just return back silently. */
@@ -480,14 +480,14 @@ decide_unroll_constant_iterations (struct loop *loop, int flags)
}
*/
static void
-unroll_loop_constant_iterations (struct loop *loop)
+unroll_loop_constant_iterations (class loop *loop)
{
unsigned HOST_WIDE_INT niter;
unsigned exit_mod;
unsigned i;
edge e;
unsigned max_unroll = loop->lpt_decision.times;
- struct niter_desc *desc = get_simple_loop_desc (loop);
+ class niter_desc *desc = get_simple_loop_desc (loop);
bool exit_at_end = loop_exit_at_end_p (loop);
struct opt_info *opt_info = NULL;
bool ok;
@@ -667,10 +667,10 @@ unroll_loop_constant_iterations (struct loop *loop)
/* Decide whether to unroll LOOP iterating runtime computable number of times
and how much. */
static void
-decide_unroll_runtime_iterations (struct loop *loop, int flags)
+decide_unroll_runtime_iterations (class loop *loop, int flags)
{
unsigned nunroll, nunroll_by_av, i;
- struct niter_desc *desc;
+ class niter_desc *desc;
widest_int iterations;
/* If we were not asked to unroll this loop, just return back silently. */
@@ -881,7 +881,7 @@ compare_and_jump_seq (rtx op0, rtx op1, enum rtx_code comp,
}
*/
static void
-unroll_loop_runtime_iterations (struct loop *loop)
+unroll_loop_runtime_iterations (class loop *loop)
{
rtx old_niter, niter, tmp;
rtx_insn *init_code, *branch_code;
@@ -894,7 +894,7 @@ unroll_loop_runtime_iterations (struct loop *loop)
edge e;
bool extra_zero_check, last_may_exit;
unsigned max_unroll = loop->lpt_decision.times;
- struct niter_desc *desc = get_simple_loop_desc (loop);
+ class niter_desc *desc = get_simple_loop_desc (loop);
bool exit_at_end = loop_exit_at_end_p (loop);
struct opt_info *opt_info = NULL;
bool ok;
@@ -1152,10 +1152,10 @@ unroll_loop_runtime_iterations (struct loop *loop)
/* Decide whether to unroll LOOP stupidly and how much. */
static void
-decide_unroll_stupid (struct loop *loop, int flags)
+decide_unroll_stupid (class loop *loop, int flags)
{
unsigned nunroll, nunroll_by_av, i;
- struct niter_desc *desc;
+ class niter_desc *desc;
widest_int iterations;
/* If we were not asked to unroll this loop, just return back silently. */
@@ -1250,10 +1250,10 @@ decide_unroll_stupid (struct loop *loop, int flags)
}
*/
static void
-unroll_loop_stupid (struct loop *loop)
+unroll_loop_stupid (class loop *loop)
{
unsigned nunroll = loop->lpt_decision.times;
- struct niter_desc *desc = get_simple_loop_desc (loop);
+ class niter_desc *desc = get_simple_loop_desc (loop);
struct opt_info *opt_info = NULL;
bool ok;
@@ -1301,7 +1301,7 @@ unroll_loop_stupid (struct loop *loop)
variable. */
static bool
-referenced_in_one_insn_in_loop_p (struct loop *loop, rtx reg,
+referenced_in_one_insn_in_loop_p (class loop *loop, rtx reg,
int *debug_uses)
{
basic_block *body, bb;
@@ -1329,7 +1329,7 @@ referenced_in_one_insn_in_loop_p (struct loop *loop, rtx reg,
/* Reset the DEBUG_USES debug insns in LOOP that reference REG. */
static void
-reset_debug_uses_in_loop (struct loop *loop, rtx reg, int debug_uses)
+reset_debug_uses_in_loop (class loop *loop, rtx reg, int debug_uses)
{
basic_block *body, bb;
unsigned i;
@@ -1378,7 +1378,7 @@ reset_debug_uses_in_loop (struct loop *loop, rtx reg, int debug_uses)
*/
static struct var_to_expand *
-analyze_insn_to_expand_var (struct loop *loop, rtx_insn *insn)
+analyze_insn_to_expand_var (class loop *loop, rtx_insn *insn)
{
rtx set, dest, src;
struct var_to_expand *ves;
@@ -1519,7 +1519,7 @@ static struct iv_to_split *
analyze_iv_to_split_insn (rtx_insn *insn)
{
rtx set, dest;
- struct rtx_iv iv;
+ class rtx_iv iv;
struct iv_to_split *ivts;
scalar_int_mode mode;
bool ok;
@@ -1571,7 +1571,7 @@ analyze_iv_to_split_insn (rtx_insn *insn)
is undefined for the return value. */
static struct opt_info *
-analyze_insns_in_loop (struct loop *loop)
+analyze_insns_in_loop (class loop *loop)
{
basic_block *body, bb;
unsigned i;
diff --git a/gcc/lra-eliminations.c b/gcc/lra-eliminations.c
index e583bca4261..943da888848 100644
--- a/gcc/lra-eliminations.c
+++ b/gcc/lra-eliminations.c
@@ -100,7 +100,7 @@ public:
of eliminating a register in favor of another. If there is more
than one way of eliminating a particular register, the most
preferred should be specified first. */
-static struct lra_elim_table *reg_eliminate = 0;
+static class lra_elim_table *reg_eliminate = 0;
/* This is an intermediate structure to initialize the table. It has
exactly the members provided by ELIMINABLE_REGS. */
@@ -118,7 +118,7 @@ static const struct elim_table_1
static void
print_elim_table (FILE *f)
{
- struct lra_elim_table *ep;
+ class lra_elim_table *ep;
for (ep = reg_eliminate; ep < &reg_eliminate[NUM_ELIMINABLE_REGS]; ep++)
{
@@ -142,7 +142,7 @@ lra_debug_elim_table (void)
VALUE. Setup FRAME_POINTER_NEEDED if elimination from frame
pointer to stack pointer is not possible anymore. */
static void
-setup_can_eliminate (struct lra_elim_table *ep, bool value)
+setup_can_eliminate (class lra_elim_table *ep, bool value)
{
ep->can_eliminate = ep->prev_can_eliminate = value;
if (! value
@@ -156,12 +156,12 @@ setup_can_eliminate (struct lra_elim_table *ep, bool value)
or NULL if none. The elimination table may contain more than
one elimination for the same hard register, but this map specifies
the one that we are currently using. */
-static struct lra_elim_table *elimination_map[FIRST_PSEUDO_REGISTER];
+static class lra_elim_table *elimination_map[FIRST_PSEUDO_REGISTER];
/* When an eliminable hard register becomes not eliminable, we use the
following special structure to restore original offsets for the
register. */
-static struct lra_elim_table self_elim_table;
+static class lra_elim_table self_elim_table;
/* Offsets should be used to restore original offsets for eliminable
hard register which just became not eliminable. Zero,
@@ -177,7 +177,7 @@ static void
setup_elimination_map (void)
{
int i;
- struct lra_elim_table *ep;
+ class lra_elim_table *ep;
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
elimination_map[i] = NULL;
@@ -242,7 +242,7 @@ form_sum (rtx x, rtx y)
int
lra_get_elimination_hard_regno (int hard_regno)
{
- struct lra_elim_table *ep;
+ class lra_elim_table *ep;
if (hard_regno < 0 || hard_regno >= FIRST_PSEUDO_REGISTER)
return hard_regno;
@@ -253,11 +253,11 @@ lra_get_elimination_hard_regno (int hard_regno)
/* Return elimination which will be used for hard reg REG, NULL
otherwise. */
-static struct lra_elim_table *
+static class lra_elim_table *
get_elimination (rtx reg)
{
int hard_regno;
- struct lra_elim_table *ep;
+ class lra_elim_table *ep;
lra_assert (REG_P (reg));
if ((hard_regno = REGNO (reg)) < 0 || hard_regno >= FIRST_PSEUDO_REGISTER)
@@ -334,7 +334,7 @@ lra_eliminate_regs_1 (rtx_insn *insn, rtx x, machine_mode mem_mode,
poly_int64 update_sp_offset, bool full_p)
{
enum rtx_code code = GET_CODE (x);
- struct lra_elim_table *ep;
+ class lra_elim_table *ep;
rtx new_rtx;
int i, j;
const char *fmt;
@@ -731,7 +731,7 @@ static void
mark_not_eliminable (rtx x, machine_mode mem_mode)
{
enum rtx_code code = GET_CODE (x);
- struct lra_elim_table *ep;
+ class lra_elim_table *ep;
int i, j;
const char *fmt;
poly_int64 offset = 0;
@@ -901,7 +901,7 @@ eliminate_regs_in_insn (rtx_insn *insn, bool replace_p, bool first_p,
int i;
rtx substed_operand[MAX_RECOG_OPERANDS];
rtx orig_operand[MAX_RECOG_OPERANDS];
- struct lra_elim_table *ep;
+ class lra_elim_table *ep;
rtx plus_src, plus_cst_src;
lra_insn_recog_data_t id;
struct lra_static_insn_data *static_id;
@@ -1109,7 +1109,7 @@ static bool
update_reg_eliminate (bitmap insns_with_changed_offsets)
{
bool prev, result;
- struct lra_elim_table *ep, *ep1;
+ class lra_elim_table *ep, *ep1;
HARD_REG_SET temp_hard_reg_set;
targetm.compute_frame_layout ();
@@ -1214,12 +1214,12 @@ update_reg_eliminate (bitmap insns_with_changed_offsets)
static void
init_elim_table (void)
{
- struct lra_elim_table *ep;
+ class lra_elim_table *ep;
bool value_p;
const struct elim_table_1 *ep1;
if (!reg_eliminate)
- reg_eliminate = XCNEWVEC (struct lra_elim_table, NUM_ELIMINABLE_REGS);
+ reg_eliminate = XCNEWVEC (class lra_elim_table, NUM_ELIMINABLE_REGS);
memset (self_elim_offsets, 0, sizeof (self_elim_offsets));
/* Initiate member values which will be never changed. */
@@ -1262,7 +1262,7 @@ init_elimination (void)
bool stop_to_sp_elimination_p;
basic_block bb;
rtx_insn *insn;
- struct lra_elim_table *ep;
+ class lra_elim_table *ep;
init_elim_table ();
FOR_EACH_BB_FN (bb, cfun)
@@ -1296,7 +1296,7 @@ void
lra_eliminate_reg_if_possible (rtx *loc)
{
int regno;
- struct lra_elim_table *ep;
+ class lra_elim_table *ep;
lra_assert (REG_P (*loc));
if ((regno = REGNO (*loc)) >= FIRST_PSEUDO_REGISTER
@@ -1340,7 +1340,7 @@ lra_eliminate (bool final_p, bool first_p)
unsigned int uid;
bitmap_head insns_with_changed_offsets;
bitmap_iterator bi;
- struct lra_elim_table *ep;
+ class lra_elim_table *ep;
gcc_assert (! final_p || ! first_p);
diff --git a/gcc/lra-int.h b/gcc/lra-int.h
index cc47c4daba2..f8db969122a 100644
--- a/gcc/lra-int.h
+++ b/gcc/lra-int.h
@@ -119,7 +119,7 @@ public:
};
/* References to the common info about each register. */
-extern struct lra_reg *lra_reg_info;
+extern class lra_reg *lra_reg_info;
extern HARD_REG_SET hard_regs_spilled_into;
@@ -244,7 +244,7 @@ public:
struct lra_insn_reg *regs;
};
-typedef struct lra_insn_recog_data *lra_insn_recog_data_t;
+typedef class lra_insn_recog_data *lra_insn_recog_data_t;
/* Whether the clobber is used temporary in LRA. */
#define LRA_TEMP_CLOBBER_P(x) \
diff --git a/gcc/lra-lives.c b/gcc/lra-lives.c
index 975e5230a91..96aa7c4717b 100644
--- a/gcc/lra-lives.c
+++ b/gcc/lra-lives.c
@@ -394,7 +394,7 @@ public:
};
/* Array for all BB data. Indexed by the corresponding BB index. */
-typedef struct bb_data_pseudos *bb_data_t;
+typedef class bb_data_pseudos *bb_data_t;
/* All basic block data are referred through the following array. */
static bb_data_t bb_data;
@@ -470,7 +470,7 @@ initiate_live_solver (void)
{
bitmap_initialize (&all_hard_regs_bitmap, &reg_obstack);
bitmap_set_range (&all_hard_regs_bitmap, 0, FIRST_PSEUDO_REGISTER);
- bb_data = XNEWVEC (struct bb_data_pseudos, last_basic_block_for_fn (cfun));
+ bb_data = XNEWVEC (class bb_data_pseudos, last_basic_block_for_fn (cfun));
bitmap_initialize (&all_blocks, &reg_obstack);
basic_block bb;
diff --git a/gcc/lra-remat.c b/gcc/lra-remat.c
index 51d3543efdc..6a5bf4f3f68 100644
--- a/gcc/lra-remat.c
+++ b/gcc/lra-remat.c
@@ -145,7 +145,7 @@ public:
};
/* Array for all BB data. Indexed by the corresponding BB index. */
-typedef struct remat_bb_data *remat_bb_data_t;
+typedef class remat_bb_data *remat_bb_data_t;
/* Basic blocks for data flow problems -- all bocks except the special
ones. */
@@ -510,7 +510,7 @@ create_remat_bb_data (void)
basic_block bb;
remat_bb_data_t bb_info;
- remat_bb_data = XNEWVEC (struct remat_bb_data,
+ remat_bb_data = XNEWVEC (class remat_bb_data,
last_basic_block_for_fn (cfun));
FOR_ALL_BB_FN (bb, cfun)
{
diff --git a/gcc/lra-spills.c b/gcc/lra-spills.c
index 5db94dc8629..c73d5013167 100644
--- a/gcc/lra-spills.c
+++ b/gcc/lra-spills.c
@@ -122,7 +122,7 @@ public:
/* Array containing info about the stack slots. The array element is
indexed by the stack slot number in the range [0..slots_num). */
-static struct slot *slots;
+static class slot *slots;
/* The number of the stack slots currently existing. */
static int slots_num;
@@ -587,7 +587,7 @@ lra_spill (void)
spill_hard_reg[i] = NULL_RTX;
pseudo_slots[i].mem = NULL_RTX;
}
- slots = XNEWVEC (struct slot, regs_num);
+ slots = XNEWVEC (class slot, regs_num);
/* Sort regnos according their usage frequencies. */
qsort (pseudo_regnos, n, sizeof (int), regno_freq_compare);
n = assign_spill_hard_regs (pseudo_regnos, n);
diff --git a/gcc/lra.c b/gcc/lra.c
index 982a3cc630b..af40f43f835 100644
--- a/gcc/lra.c
+++ b/gcc/lra.c
@@ -972,7 +972,7 @@ lra_set_insn_recog_data (rtx_insn *insn)
/* It might be a new simple insn which is not recognized yet. */
INSN_CODE (insn) = icode = recog_memoized (insn);
}
- data = XNEW (struct lra_insn_recog_data);
+ data = XNEW (class lra_insn_recog_data);
lra_insn_recog_data[uid] = data;
data->insn = insn;
data->used_insn_alternative = LRA_UNKNOWN_ALT;
@@ -1306,7 +1306,7 @@ lra_set_used_insn_alternative_by_uid (int uid, int alt)
/* The size of the following array. */
static int reg_info_size;
/* Common info about each register. */
-struct lra_reg *lra_reg_info;
+class lra_reg *lra_reg_info;
HARD_REG_SET hard_regs_spilled_into;
@@ -1356,7 +1356,7 @@ init_reg_info (void)
last_reg_value = 0;
reg_info_size = max_reg_num () * 3 / 2 + 1;
- lra_reg_info = XNEWVEC (struct lra_reg, reg_info_size);
+ lra_reg_info = XNEWVEC (class lra_reg, reg_info_size);
for (i = 0; i < reg_info_size; i++)
initialize_lra_reg_info_element (i);
copy_vec.truncate (0);
@@ -1385,7 +1385,7 @@ expand_reg_info (void)
if (reg_info_size > max_reg_num ())
return;
reg_info_size = max_reg_num () * 3 / 2 + 1;
- lra_reg_info = XRESIZEVEC (struct lra_reg, lra_reg_info, reg_info_size);
+ lra_reg_info = XRESIZEVEC (class lra_reg, lra_reg_info, reg_info_size);
for (i = old; i < reg_info_size; i++)
initialize_lra_reg_info_element (i);
}
diff --git a/gcc/lto-cgraph.c b/gcc/lto-cgraph.c
index 4dfa2862be3..bc0f0107333 100644
--- a/gcc/lto-cgraph.c
+++ b/gcc/lto-cgraph.c
@@ -1012,7 +1012,7 @@ output_symtab (void)
/* Return identifier encoded in IB as a plain string. */
static tree
-read_identifier (struct lto_input_block *ib)
+read_identifier (class lto_input_block *ib)
{
unsigned int len = strnlen (ib->data + ib->p, ib->len - ib->p - 1);
tree id;
@@ -1032,7 +1032,7 @@ read_identifier (struct lto_input_block *ib)
/* Return string encoded in IB, NULL if string is empty. */
static const char *
-read_string (struct lto_input_block *ib)
+read_string (class lto_input_block *ib)
{
unsigned int len = strnlen (ib->data + ib->p, ib->len - ib->p - 1);
const char *str;
@@ -1203,7 +1203,7 @@ get_alias_symbol (tree decl)
static struct cgraph_node *
input_node (struct lto_file_decl_data *file_data,
- struct lto_input_block *ib,
+ class lto_input_block *ib,
enum LTO_symtab_tags tag,
vec<symtab_node *> nodes)
{
@@ -1326,7 +1326,7 @@ input_node (struct lto_file_decl_data *file_data,
static varpool_node *
input_varpool_node (struct lto_file_decl_data *file_data,
- struct lto_input_block *ib)
+ class lto_input_block *ib)
{
int decl_index;
tree var_decl;
@@ -1402,7 +1402,7 @@ input_varpool_node (struct lto_file_decl_data *file_data,
Return the node read or overwriten. */
static void
-input_ref (struct lto_input_block *ib,
+input_ref (class lto_input_block *ib,
symtab_node *referring_node,
vec<symtab_node *> nodes)
{
@@ -1428,7 +1428,7 @@ input_ref (struct lto_input_block *ib,
indirect_unknown_callee set). */
static void
-input_edge (struct lto_input_block *ib, vec<symtab_node *> nodes,
+input_edge (class lto_input_block *ib, vec<symtab_node *> nodes,
bool indirect)
{
struct cgraph_node *caller, *callee;
@@ -1496,7 +1496,7 @@ input_edge (struct lto_input_block *ib, vec<symtab_node *> nodes,
static vec<symtab_node *>
input_cgraph_1 (struct lto_file_decl_data *file_data,
- struct lto_input_block *ib)
+ class lto_input_block *ib)
{
enum LTO_symtab_tags tag;
vec<symtab_node *> nodes = vNULL;
@@ -1573,7 +1573,7 @@ input_cgraph_1 (struct lto_file_decl_data *file_data,
/* Input ipa_refs. */
static void
-input_refs (struct lto_input_block *ib,
+input_refs (class lto_input_block *ib,
vec<symtab_node *> nodes)
{
int count;
@@ -1596,7 +1596,7 @@ input_refs (struct lto_input_block *ib,
/* Input profile_info from IB. */
static void
-input_profile_summary (struct lto_input_block *ib,
+input_profile_summary (class lto_input_block *ib,
struct lto_file_decl_data *file_data)
{
unsigned int runs = streamer_read_uhwi (ib);
@@ -1693,7 +1693,7 @@ input_symtab (void)
{
const char *data;
size_t len;
- struct lto_input_block *ib;
+ class lto_input_block *ib;
vec<symtab_node *> nodes;
ib = lto_create_simple_input_block (file_data, LTO_section_symtab_nodes,
@@ -1750,7 +1750,7 @@ input_offload_tables (bool do_force_output)
{
const char *data;
size_t len;
- struct lto_input_block *ib
+ class lto_input_block *ib
= lto_create_simple_input_block (file_data, LTO_section_offload_table,
&data, &len);
if (!ib)
@@ -1909,7 +1909,7 @@ output_cgraph_opt_summary (void)
static void
input_edge_opt_summary (struct cgraph_edge *edge ATTRIBUTE_UNUSED,
- struct lto_input_block *ib_main ATTRIBUTE_UNUSED)
+ class lto_input_block *ib_main ATTRIBUTE_UNUSED)
{
}
@@ -1917,8 +1917,8 @@ input_edge_opt_summary (struct cgraph_edge *edge ATTRIBUTE_UNUSED,
static void
input_node_opt_summary (struct cgraph_node *node,
- struct lto_input_block *ib_main,
- struct data_in *data_in)
+ class lto_input_block *ib_main,
+ class data_in *data_in)
{
int i;
int count;
@@ -1973,7 +1973,7 @@ input_cgraph_opt_section (struct lto_file_decl_data *file_data,
const int cfg_offset = sizeof (struct lto_function_header);
const int main_offset = cfg_offset + header->cfg_size;
const int string_offset = main_offset + header->main_size;
- struct data_in *data_in;
+ class data_in *data_in;
unsigned int i;
unsigned int count;
diff --git a/gcc/lto-section-in.c b/gcc/lto-section-in.c
index 80fdb03f4d4..4c2870176ae 100644
--- a/gcc/lto-section-in.c
+++ b/gcc/lto-section-in.c
@@ -228,7 +228,7 @@ lto_free_raw_section_data (struct lto_file_decl_data *file_data,
raw pointer to the section is returned in DATAR and LEN. These are
used to free the section. Return NULL if the section is not present. */
-struct lto_input_block *
+class lto_input_block *
lto_create_simple_input_block (struct lto_file_decl_data *file_data,
enum lto_section_type section_type,
const char **datar, size_t *len)
@@ -257,7 +257,7 @@ lto_create_simple_input_block (struct lto_file_decl_data *file_data,
void
lto_destroy_simple_input_block (struct lto_file_decl_data *file_data,
enum lto_section_type section_type,
- struct lto_input_block *ib,
+ class lto_input_block *ib,
const char *data, size_t len)
{
delete ib;
@@ -437,7 +437,7 @@ lto_free_function_in_decl_state_for_node (symtab_node *node)
/* Report read pass end of the section. */
void
-lto_section_overrun (struct lto_input_block *ib)
+lto_section_overrun (class lto_input_block *ib)
{
fatal_error (input_location, "bytecode stream: trying to read %d bytes "
"after the end of the input buffer", ib->p - ib->len);
diff --git a/gcc/lto-streamer-in.c b/gcc/lto-streamer-in.c
index 49fd5730c70..155805b3c43 100644
--- a/gcc/lto-streamer-in.c
+++ b/gcc/lto-streamer-in.c
@@ -84,7 +84,7 @@ lto_tag_check_set (enum LTO_tags actual, int ntags, ...)
/* Read LENGTH bytes from STREAM to ADDR. */
void
-lto_input_data_block (struct lto_input_block *ib, void *addr, size_t length)
+lto_input_data_block (class lto_input_block *ib, void *addr, size_t length)
{
size_t i;
unsigned char *const buffer = (unsigned char *) addr;
@@ -232,7 +232,7 @@ lto_location_cache::revert_location_cache ()
void
lto_location_cache::input_location (location_t *loc, struct bitpack_d *bp,
- struct data_in *data_in)
+ class data_in *data_in)
{
static const char *stream_file;
static int stream_line;
@@ -287,7 +287,7 @@ lto_location_cache::input_location (location_t *loc, struct bitpack_d *bp,
void
lto_input_location (location_t *loc, struct bitpack_d *bp,
- struct data_in *data_in)
+ class data_in *data_in)
{
data_in->location_cache.input_location (loc, bp, data_in);
}
@@ -297,7 +297,7 @@ lto_input_location (location_t *loc, struct bitpack_d *bp,
discarded. */
location_t
-stream_input_location_now (struct bitpack_d *bp, struct data_in *data_in)
+stream_input_location_now (struct bitpack_d *bp, class data_in *data_in)
{
location_t loc;
stream_input_location (&loc, bp, data_in);
@@ -313,7 +313,7 @@ stream_input_location_now (struct bitpack_d *bp, struct data_in *data_in)
function scope for the read tree. */
tree
-lto_input_tree_ref (struct lto_input_block *ib, struct data_in *data_in,
+lto_input_tree_ref (class lto_input_block *ib, class data_in *data_in,
struct function *fn, enum LTO_tags tag)
{
unsigned HOST_WIDE_INT ix_u;
@@ -378,7 +378,7 @@ lto_input_tree_ref (struct lto_input_block *ib, struct data_in *data_in,
block IB, using descriptors in DATA_IN. */
static struct eh_catch_d *
-lto_input_eh_catch_list (struct lto_input_block *ib, struct data_in *data_in,
+lto_input_eh_catch_list (class lto_input_block *ib, class data_in *data_in,
eh_catch *last_p)
{
eh_catch first;
@@ -424,7 +424,7 @@ lto_input_eh_catch_list (struct lto_input_block *ib, struct data_in *data_in,
in DATA_IN. */
static eh_region
-input_eh_region (struct lto_input_block *ib, struct data_in *data_in, int ix)
+input_eh_region (class lto_input_block *ib, class data_in *data_in, int ix)
{
enum LTO_tags tag;
eh_region r;
@@ -499,7 +499,7 @@ input_eh_region (struct lto_input_block *ib, struct data_in *data_in, int ix)
in DATA_IN. */
static eh_landing_pad
-input_eh_lp (struct lto_input_block *ib, struct data_in *data_in, int ix)
+input_eh_lp (class lto_input_block *ib, class data_in *data_in, int ix)
{
enum LTO_tags tag;
eh_landing_pad lp;
@@ -603,7 +603,7 @@ lto_init_eh (void)
in DATA_IN. */
static void
-input_eh_regions (struct lto_input_block *ib, struct data_in *data_in,
+input_eh_regions (class lto_input_block *ib, class data_in *data_in,
struct function *fn)
{
HOST_WIDE_INT i, root_region, len;
@@ -714,7 +714,7 @@ make_new_block (struct function *fn, unsigned int index)
/* Read the CFG for function FN from input block IB. */
static void
-input_cfg (struct lto_input_block *ib, struct data_in *data_in,
+input_cfg (class lto_input_block *ib, class data_in *data_in,
struct function *fn)
{
unsigned int bb_count;
@@ -807,7 +807,7 @@ input_cfg (struct lto_input_block *ib, struct data_in *data_in,
continue;
}
- struct loop *loop = alloc_loop ();
+ class loop *loop = alloc_loop ();
loop->header = BASIC_BLOCK_FOR_FN (fn, header_index);
loop->header->loop_father = loop;
@@ -847,7 +847,7 @@ input_cfg (struct lto_input_block *ib, struct data_in *data_in,
block IB. */
static void
-input_ssa_names (struct lto_input_block *ib, struct data_in *data_in,
+input_ssa_names (class lto_input_block *ib, class data_in *data_in,
struct function *fn)
{
unsigned int i, size;
@@ -964,8 +964,8 @@ fixup_call_stmt_edges (struct cgraph_node *orig, gimple **stmts)
using input block IB. */
static void
-input_struct_function_base (struct function *fn, struct data_in *data_in,
- struct lto_input_block *ib)
+input_struct_function_base (struct function *fn, class data_in *data_in,
+ class lto_input_block *ib)
{
struct bitpack_d bp;
int len;
@@ -1029,8 +1029,8 @@ input_struct_function_base (struct function *fn, struct data_in *data_in,
/* Read the body of function FN_DECL from DATA_IN using input block IB. */
static void
-input_function (tree fn_decl, struct data_in *data_in,
- struct lto_input_block *ib, struct lto_input_block *ib_cfg)
+input_function (tree fn_decl, class data_in *data_in,
+ class lto_input_block *ib, class lto_input_block *ib_cfg)
{
struct function *fn;
enum LTO_tags tag;
@@ -1233,8 +1233,8 @@ input_function (tree fn_decl, struct data_in *data_in,
/* Read the body of function FN_DECL from DATA_IN using input block IB. */
static void
-input_constructor (tree var, struct data_in *data_in,
- struct lto_input_block *ib)
+input_constructor (tree var, class data_in *data_in,
+ class lto_input_block *ib)
{
DECL_INITIAL (var) = stream_read_tree (ib, data_in);
}
@@ -1251,7 +1251,7 @@ lto_read_body_or_constructor (struct lto_file_decl_data *file_data, struct symta
const char *data, enum lto_section_type section_type)
{
const struct lto_function_header *header;
- struct data_in *data_in;
+ class data_in *data_in;
int cfg_offset;
int main_offset;
int string_offset;
@@ -1364,7 +1364,7 @@ vec<dref_entry> dref_queue;
input block IB using the per-file context in DATA_IN. */
static void
-lto_read_tree_1 (struct lto_input_block *ib, struct data_in *data_in, tree expr)
+lto_read_tree_1 (class lto_input_block *ib, class data_in *data_in, tree expr)
{
/* Read all the bitfield values in EXPR. Note that for LTO, we
only write language-independent bitfields, so no more unpacking is
@@ -1402,7 +1402,7 @@ lto_read_tree_1 (struct lto_input_block *ib, struct data_in *data_in, tree expr)
input block IB using the per-file context in DATA_IN. */
static tree
-lto_read_tree (struct lto_input_block *ib, struct data_in *data_in,
+lto_read_tree (class lto_input_block *ib, class data_in *data_in,
enum LTO_tags tag, hashval_t hash)
{
/* Instantiate a new tree node. */
@@ -1425,7 +1425,7 @@ lto_read_tree (struct lto_input_block *ib, struct data_in *data_in,
following in the IB, DATA_IN stream. */
hashval_t
-lto_input_scc (struct lto_input_block *ib, struct data_in *data_in,
+lto_input_scc (class lto_input_block *ib, class data_in *data_in,
unsigned *len, unsigned *entry_len)
{
/* A blob of unnamed tree nodes, fill the cache from it and
@@ -1482,7 +1482,7 @@ lto_input_scc (struct lto_input_block *ib, struct data_in *data_in,
to previously read nodes. */
tree
-lto_input_tree_1 (struct lto_input_block *ib, struct data_in *data_in,
+lto_input_tree_1 (class lto_input_block *ib, class data_in *data_in,
enum LTO_tags tag, hashval_t hash)
{
tree result;
@@ -1532,7 +1532,7 @@ lto_input_tree_1 (struct lto_input_block *ib, struct data_in *data_in,
}
tree
-lto_input_tree (struct lto_input_block *ib, struct data_in *data_in)
+lto_input_tree (class lto_input_block *ib, class data_in *data_in)
{
enum LTO_tags tag;
@@ -1564,7 +1564,7 @@ lto_input_toplevel_asms (struct lto_file_decl_data *file_data, int order_base)
const struct lto_simple_header_with_strings *header
= (const struct lto_simple_header_with_strings *) data;
int string_offset;
- struct data_in *data_in;
+ class data_in *data_in;
tree str;
if (! data)
@@ -1612,7 +1612,7 @@ lto_input_mode_table (struct lto_file_decl_data *file_data)
const struct lto_simple_header_with_strings *header
= (const struct lto_simple_header_with_strings *) data;
int string_offset;
- struct data_in *data_in;
+ class data_in *data_in;
string_offset = sizeof (*header) + header->main_size;
lto_input_block ib (data + sizeof (*header), header->main_size, NULL);
@@ -1727,12 +1727,12 @@ lto_reader_init (void)
table to use with LEN strings. RESOLUTIONS is the vector of linker
resolutions (NULL if not using a linker plugin). */
-struct data_in *
+class data_in *
lto_data_in_create (struct lto_file_decl_data *file_data, const char *strings,
unsigned len,
vec<ld_plugin_symbol_resolution_t> resolutions)
{
- struct data_in *data_in = new (struct data_in);
+ class data_in *data_in = new (class data_in);
data_in->file_data = file_data;
data_in->strings = strings;
data_in->strings_len = len;
@@ -1745,7 +1745,7 @@ lto_data_in_create (struct lto_file_decl_data *file_data, const char *strings,
/* Remove DATA_IN. */
void
-lto_data_in_delete (struct data_in *data_in)
+lto_data_in_delete (class data_in *data_in)
{
data_in->globals_resolution.release ();
streamer_tree_cache_delete (data_in->reader_cache);
diff --git a/gcc/lto-streamer-out.c b/gcc/lto-streamer-out.c
index 49ca5cecad1..35dcae4d589 100644
--- a/gcc/lto-streamer-out.c
+++ b/gcc/lto-streamer-out.c
@@ -1911,7 +1911,7 @@ output_cfg (struct output_block *ob, struct function *fn)
/* Output each loop, skipping the tree root which has number zero. */
for (unsigned i = 1; i < number_of_loops (fn); ++i)
{
- struct loop *loop = get_loop (fn, i);
+ class loop *loop = get_loop (fn, i);
/* Write the index of the loop header. That's enough to rebuild
the loop tree on the reader side. Stream -1 for an unused
diff --git a/gcc/lto-streamer.h b/gcc/lto-streamer.h
index d5dbeb73b32..3c35d8a3f9a 100644
--- a/gcc/lto-streamer.h
+++ b/gcc/lto-streamer.h
@@ -308,7 +308,7 @@ public:
/* Tree merging did suceed; throw away recent changes. */
void revert_location_cache ();
void input_location (location_t *loc, struct bitpack_d *bp,
- struct data_in *data_in);
+ class data_in *data_in);
lto_location_cache ()
: loc_cache (), accepted_length (0), current_file (NULL), current_line (0),
current_col (0), current_sysp (false), current_loc (UNKNOWN_LOCATION)
@@ -759,13 +759,13 @@ public:
/* In lto-section-in.c */
-extern struct lto_input_block * lto_create_simple_input_block (
+extern class lto_input_block * lto_create_simple_input_block (
struct lto_file_decl_data *,
enum lto_section_type, const char **, size_t *);
extern void
lto_destroy_simple_input_block (struct lto_file_decl_data *,
enum lto_section_type,
- struct lto_input_block *, const char *, size_t);
+ class lto_input_block *, const char *, size_t);
extern void lto_set_in_hooks (struct lto_file_decl_data **,
lto_get_section_data_f *,
lto_free_section_data_f *);
@@ -795,7 +795,7 @@ extern struct lto_in_decl_state *lto_get_function_in_decl_state (
struct lto_file_decl_data *, tree);
extern void lto_free_function_in_decl_state (struct lto_in_decl_state *);
extern void lto_free_function_in_decl_state_for_node (symtab_node *);
-extern void lto_section_overrun (struct lto_input_block *) ATTRIBUTE_NORETURN;
+extern void lto_section_overrun (class lto_input_block *) ATTRIBUTE_NORETURN;
extern void lto_value_range_error (const char *,
HOST_WIDE_INT, HOST_WIDE_INT,
HOST_WIDE_INT) ATTRIBUTE_NORETURN;
@@ -860,23 +860,23 @@ extern void lto_input_constructors_and_inits (struct lto_file_decl_data *,
const char *);
extern void lto_input_toplevel_asms (struct lto_file_decl_data *, int);
extern void lto_input_mode_table (struct lto_file_decl_data *);
-extern struct data_in *lto_data_in_create (struct lto_file_decl_data *,
+extern class data_in *lto_data_in_create (struct lto_file_decl_data *,
const char *, unsigned,
vec<ld_plugin_symbol_resolution_t> );
-extern void lto_data_in_delete (struct data_in *);
-extern void lto_input_data_block (struct lto_input_block *, void *, size_t);
-void lto_input_location (location_t *, struct bitpack_d *, struct data_in *);
+extern void lto_data_in_delete (class data_in *);
+extern void lto_input_data_block (class lto_input_block *, void *, size_t);
+void lto_input_location (location_t *, struct bitpack_d *, class data_in *);
location_t stream_input_location_now (struct bitpack_d *bp,
- struct data_in *data);
-tree lto_input_tree_ref (struct lto_input_block *, struct data_in *,
+ class data_in *data);
+tree lto_input_tree_ref (class lto_input_block *, class data_in *,
struct function *, enum LTO_tags);
void lto_tag_check_set (enum LTO_tags, int, ...);
void lto_init_eh (void);
-hashval_t lto_input_scc (struct lto_input_block *, struct data_in *,
+hashval_t lto_input_scc (class lto_input_block *, class data_in *,
unsigned *, unsigned *);
-tree lto_input_tree_1 (struct lto_input_block *, struct data_in *,
+tree lto_input_tree_1 (class lto_input_block *, class data_in *,
enum LTO_tags, hashval_t hash);
-tree lto_input_tree (struct lto_input_block *, struct data_in *);
+tree lto_input_tree (class lto_input_block *, class data_in *);
/* In lto-streamer-out.c */
@@ -931,14 +931,14 @@ void select_what_to_stream (void);
void cl_target_option_stream_out (struct output_block *, struct bitpack_d *,
struct cl_target_option *);
-void cl_target_option_stream_in (struct data_in *,
+void cl_target_option_stream_in (class data_in *,
struct bitpack_d *,
struct cl_target_option *);
void cl_optimization_stream_out (struct output_block *,
struct bitpack_d *, struct cl_optimization *);
-void cl_optimization_stream_in (struct data_in *,
+void cl_optimization_stream_in (class data_in *,
struct bitpack_d *, struct cl_optimization *);
diff --git a/gcc/lto/ChangeLog b/gcc/lto/ChangeLog
index f3796f56b77..83d166d94a4 100644
--- a/gcc/lto/ChangeLog
+++ b/gcc/lto/ChangeLog
@@ -1,6 +1,18 @@
2019-07-09 Martin Sebor <msebor@redhat.com>
PR c++/61339
+ * lto-common.c (lto_splay_tree_new): : Change class-key of PODs
+ to struct and others to class.
+ (mentions_vars_p): Same.
+ (register_resolution): Same.
+ (lto_register_var_decl_in_symtab): Same.
+ (lto_register_function_decl_in_symtab): Same.
+ (cmp_tree): Same.
+ (lto_read_decls): Same.
+
+2019-07-09 Martin Sebor <msebor@redhat.com>
+
+ PR c++/61339
* lto-dump.c: Change class-key from class to struct and vice versa
to match convention and avoid -Wclass-is-pod and -Wstruct-no-pod.
diff --git a/gcc/lto/lto-common.c b/gcc/lto/lto-common.c
index 3c6d7b25ddb..c9d97815cfb 100644
--- a/gcc/lto/lto-common.c
+++ b/gcc/lto/lto-common.c
@@ -179,7 +179,7 @@ lto_splay_tree_new (void)
input. */
static const uint32_t *
-lto_read_in_decl_state (struct data_in *data_in, const uint32_t *data,
+lto_read_in_decl_state (class data_in *data_in, const uint32_t *data,
struct lto_in_decl_state *state)
{
uint32_t ix;
@@ -868,7 +868,7 @@ mentions_vars_p (tree t)
/* Return the resolution for the decl with index INDEX from DATA_IN. */
static enum ld_plugin_symbol_resolution
-get_resolution (struct data_in *data_in, unsigned index)
+get_resolution (class data_in *data_in, unsigned index)
{
if (data_in->globals_resolution.exists ())
{
@@ -911,7 +911,7 @@ register_resolution (struct lto_file_decl_data *file_data, tree decl,
different files. */
static void
-lto_register_var_decl_in_symtab (struct data_in *data_in, tree decl,
+lto_register_var_decl_in_symtab (class data_in *data_in, tree decl,
unsigned ix)
{
tree context;
@@ -936,7 +936,7 @@ lto_register_var_decl_in_symtab (struct data_in *data_in, tree decl,
file being read. */
static void
-lto_register_function_decl_in_symtab (struct data_in *data_in, tree decl,
+lto_register_function_decl_in_symtab (class data_in *data_in, tree decl,
unsigned ix)
{
/* If this variable has already been declared, queue the
@@ -949,7 +949,7 @@ lto_register_function_decl_in_symtab (struct data_in *data_in, tree decl,
/* Check if T is a decl and needs register its resolution info. */
static void
-lto_maybe_register_decl (struct data_in *data_in, tree t, unsigned ix)
+lto_maybe_register_decl (class data_in *data_in, tree t, unsigned ix)
{
if (TREE_CODE (t) == VAR_DECL)
lto_register_var_decl_in_symtab (data_in, t, ix);
@@ -1624,7 +1624,7 @@ cmp_tree (const void *p1_, const void *p2_)
that was successful, otherwise return false. */
static bool
-unify_scc (struct data_in *data_in, unsigned from,
+unify_scc (class data_in *data_in, unsigned from,
unsigned len, unsigned scc_entry_len, hashval_t scc_hash)
{
bool unified_p = false;
@@ -1787,7 +1787,7 @@ lto_read_decls (struct lto_file_decl_data *decl_data, const void *data,
const int decl_offset = sizeof (struct lto_decl_header);
const int main_offset = decl_offset + header->decl_state_size;
const int string_offset = main_offset + header->main_size;
- struct data_in *data_in;
+ class data_in *data_in;
unsigned int i;
const uint32_t *data_ptr, *data_end;
uint32_t num_decl_states;
diff --git a/gcc/modulo-sched.c b/gcc/modulo-sched.c
index 9954ea5cfa1..c355594bb6b 100644
--- a/gcc/modulo-sched.c
+++ b/gcc/modulo-sched.c
@@ -211,7 +211,7 @@ static int sms_order_nodes (ddg_ptr, int, int *, int *);
static void set_node_sched_params (ddg_ptr);
static partial_schedule_ptr sms_schedule_by_order (ddg_ptr, int, int, int *);
static void permute_partial_schedule (partial_schedule_ptr, rtx_insn *);
-static void generate_prolog_epilog (partial_schedule_ptr, struct loop *,
+static void generate_prolog_epilog (partial_schedule_ptr, class loop *,
rtx, rtx);
static int calculate_stage_count (partial_schedule_ptr, int);
static void calculate_must_precede_follow (ddg_node_ptr, int, int,
@@ -1124,7 +1124,7 @@ duplicate_insns_of_cycles (partial_schedule_ptr ps, int from_stage,
/* Generate the instructions (including reg_moves) for prolog & epilog. */
static void
-generate_prolog_epilog (partial_schedule_ptr ps, struct loop *loop,
+generate_prolog_epilog (partial_schedule_ptr ps, class loop *loop,
rtx count_reg, rtx count_init)
{
int i;
@@ -1181,7 +1181,7 @@ generate_prolog_epilog (partial_schedule_ptr ps, struct loop *loop,
/* Mark LOOP as software pipelined so the later
scheduling passes don't touch it. */
static void
-mark_loop_unsched (struct loop *loop)
+mark_loop_unsched (class loop *loop)
{
unsigned i;
basic_block *bbs = get_loop_body (loop);
@@ -1195,7 +1195,7 @@ mark_loop_unsched (struct loop *loop)
/* Return true if all the BBs of the loop are empty except the
loop header. */
static bool
-loop_single_full_bb_p (struct loop *loop)
+loop_single_full_bb_p (class loop *loop)
{
unsigned i;
basic_block *bbs = get_loop_body (loop);
@@ -1251,7 +1251,7 @@ dump_insn_location (rtx_insn *insn)
/* Return true if the loop is in its canonical form and false if not.
i.e. SIMPLE_SMS_LOOP_P and have one preheader block, and single exit. */
static bool
-loop_canon_p (struct loop *loop)
+loop_canon_p (class loop *loop)
{
if (loop->inner || !loop_outer (loop))
@@ -1294,7 +1294,7 @@ loop_canon_p (struct loop *loop)
make it one by splitting the first entry edge and
redirecting the others to the new BB. */
static void
-canon_loop (struct loop *loop)
+canon_loop (class loop *loop)
{
edge e;
edge_iterator i;
@@ -1346,7 +1346,7 @@ sms_schedule (void)
int maxii, max_asap;
partial_schedule_ptr ps;
basic_block bb = NULL;
- struct loop *loop;
+ class loop *loop;
basic_block condition_bb = NULL;
edge latch_edge;
HOST_WIDE_INT trip_count, max_trip_count;
diff --git a/gcc/omp-expand.c b/gcc/omp-expand.c
index 1de445b6911..c007ec168d5 100644
--- a/gcc/omp-expand.c
+++ b/gcc/omp-expand.c
@@ -2516,7 +2516,7 @@ expand_omp_for_ordered_loops (struct omp_for_data *fd, tree *counts,
if (e2)
{
- struct loop *loop = alloc_loop ();
+ class loop *loop = alloc_loop ();
loop->header = new_header;
loop->latch = e2->src;
add_loop (loop, body_bb->loop_father);
@@ -3477,14 +3477,14 @@ expand_omp_for_generic (struct omp_region *region,
/* We enter expand_omp_for_generic with a loop. This original loop may
have its own loop struct, or it may be part of an outer loop struct
(which may be the fake loop). */
- struct loop *outer_loop = entry_bb->loop_father;
+ class loop *outer_loop = entry_bb->loop_father;
bool orig_loop_has_loop_struct = l1_bb->loop_father != outer_loop;
add_bb_to_loop (l2_bb, outer_loop);
/* We've added a new loop around the original loop. Allocate the
corresponding loop struct. */
- struct loop *new_loop = alloc_loop ();
+ class loop *new_loop = alloc_loop ();
new_loop->header = l0_bb;
new_loop->latch = l2_bb;
add_loop (new_loop, outer_loop);
@@ -3494,7 +3494,7 @@ expand_omp_for_generic (struct omp_region *region,
if (!orig_loop_has_loop_struct
&& !gimple_omp_for_combined_p (fd->for_stmt))
{
- struct loop *orig_loop = alloc_loop ();
+ class loop *orig_loop = alloc_loop ();
orig_loop->header = l1_bb;
/* The loop may have multiple latches. */
add_loop (orig_loop, new_loop);
@@ -4356,7 +4356,7 @@ expand_omp_for_static_nochunk (struct omp_region *region,
set_immediate_dominator (CDI_DOMINATORS, exit3_bb, exit_bb);
}
- struct loop *loop = body_bb->loop_father;
+ class loop *loop = body_bb->loop_father;
if (loop != entry_bb->loop_father)
{
gcc_assert (broken_loop || loop->header == body_bb);
@@ -5104,8 +5104,8 @@ expand_omp_for_static_chunk (struct omp_region *region,
if (!broken_loop)
{
- struct loop *loop = body_bb->loop_father;
- struct loop *trip_loop = alloc_loop ();
+ class loop *loop = body_bb->loop_father;
+ class loop *trip_loop = alloc_loop ();
trip_loop->header = iter_part_bb;
trip_loop->latch = trip_update_bb;
add_loop (trip_loop, iter_part_bb->loop_father);
@@ -5523,7 +5523,7 @@ expand_omp_simd (struct omp_region *region, struct omp_for_data *fd)
if (!broken_loop)
{
- struct loop *loop = alloc_loop ();
+ class loop *loop = alloc_loop ();
loop->header = l1_bb;
loop->latch = cont_bb;
add_loop (loop, l1_bb->loop_father);
@@ -5944,7 +5944,7 @@ expand_omp_taskloop_for_inner (struct omp_region *region,
if (!broken_loop && !gimple_omp_for_combined_p (fd->for_stmt))
{
- struct loop *loop = alloc_loop ();
+ class loop *loop = alloc_loop ();
loop->header = body_bb;
if (collapse_bb == NULL)
loop->latch = cont_bb;
@@ -6461,12 +6461,12 @@ expand_oacc_for (struct omp_region *region, struct omp_for_data *fd)
{
/* We now have one, two or three nested loops. Update the loop
structures. */
- struct loop *parent = entry_bb->loop_father;
- struct loop *body = body_bb->loop_father;
+ class loop *parent = entry_bb->loop_father;
+ class loop *body = body_bb->loop_father;
if (chunking)
{
- struct loop *chunk_loop = alloc_loop ();
+ class loop *chunk_loop = alloc_loop ();
chunk_loop->header = head_bb;
chunk_loop->latch = bottom_bb;
add_loop (chunk_loop, parent);
@@ -6482,7 +6482,7 @@ expand_oacc_for (struct omp_region *region, struct omp_for_data *fd)
if (parent)
{
- struct loop *body_loop = alloc_loop ();
+ class loop *body_loop = alloc_loop ();
body_loop->header = body_bb;
body_loop->latch = cont_bb;
add_loop (body_loop, parent);
@@ -6490,7 +6490,7 @@ expand_oacc_for (struct omp_region *region, struct omp_for_data *fd)
if (fd->tiling)
{
/* Insert tiling's element loop. */
- struct loop *inner_loop = alloc_loop ();
+ class loop *inner_loop = alloc_loop ();
inner_loop->header = elem_body_bb;
inner_loop->latch = elem_cont_bb;
add_loop (inner_loop, body_loop);
@@ -7475,7 +7475,7 @@ expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
/* Remove GIMPLE_OMP_ATOMIC_STORE. */
gsi_remove (&si, true);
- struct loop *loop = alloc_loop ();
+ class loop *loop = alloc_loop ();
loop->header = loop_header;
loop->latch = store_bb;
add_loop (loop, loop_header->loop_father);
@@ -7625,14 +7625,14 @@ static void
mark_loops_in_oacc_kernels_region (basic_block region_entry,
basic_block region_exit)
{
- struct loop *outer = region_entry->loop_father;
+ class loop *outer = region_entry->loop_father;
gcc_assert (region_exit == NULL || outer == region_exit->loop_father);
/* Don't parallelize the kernels region if it contains more than one outer
loop. */
unsigned int nr_outer_loops = 0;
- struct loop *single_outer = NULL;
- for (struct loop *loop = outer->inner; loop != NULL; loop = loop->next)
+ class loop *single_outer = NULL;
+ for (class loop *loop = outer->inner; loop != NULL; loop = loop->next)
{
gcc_assert (loop_outer (loop) == outer);
@@ -7649,14 +7649,14 @@ mark_loops_in_oacc_kernels_region (basic_block region_entry,
if (nr_outer_loops != 1)
return;
- for (struct loop *loop = single_outer->inner;
+ for (class loop *loop = single_outer->inner;
loop != NULL;
loop = loop->inner)
if (loop->next)
return;
/* Mark the loops in the region. */
- for (struct loop *loop = single_outer; loop != NULL; loop = loop->inner)
+ for (class loop *loop = single_outer; loop != NULL; loop = loop->inner)
loop->in_oacc_kernels_region = true;
}
diff --git a/gcc/omp-offload.c b/gcc/omp-offload.c
index c8a281c6d28..da788d9f514 100644
--- a/gcc/omp-offload.c
+++ b/gcc/omp-offload.c
@@ -389,8 +389,8 @@ oacc_xform_loop (gcall *call)
|| !global_options_set.x_flag_tree_loop_vectorize))
{
basic_block bb = gsi_bb (gsi);
- struct loop *parent = bb->loop_father;
- struct loop *body = parent->inner;
+ class loop *parent = bb->loop_father;
+ class loop *body = parent->inner;
parent->force_vectorize = true;
parent->safelen = INT_MAX;
diff --git a/gcc/omp-simd-clone.c b/gcc/omp-simd-clone.c
index 472e2025e19..caa8da3cba5 100644
--- a/gcc/omp-simd-clone.c
+++ b/gcc/omp-simd-clone.c
@@ -1194,7 +1194,7 @@ simd_clone_adjust (struct cgraph_node *node)
gimple *g;
basic_block incr_bb = NULL;
- struct loop *loop = NULL;
+ class loop *loop = NULL;
/* Create a new BB right before the original exit BB, to hold the
iteration increment and the condition/branch. */
diff --git a/gcc/optabs-query.c b/gcc/optabs-query.c
index 4116bfe45da..2a066960e22 100644
--- a/gcc/optabs-query.c
+++ b/gcc/optabs-query.c
@@ -120,7 +120,7 @@ get_traditional_extraction_insn (extraction_insn *insn,
POS_OP is the operand number of the bit position. */
static bool
-get_optab_extraction_insn (struct extraction_insn *insn,
+get_optab_extraction_insn (class extraction_insn *insn,
enum extraction_type type,
machine_mode mode, direct_optab reg_optab,
direct_optab misalign_optab, int pos_op)
diff --git a/gcc/optabs.c b/gcc/optabs.c
index 18ca7370917..193cd9135b7 100644
--- a/gcc/optabs.c
+++ b/gcc/optabs.c
@@ -250,7 +250,7 @@ rtx
expand_widen_pattern_expr (sepops ops, rtx op0, rtx op1, rtx wide_op,
rtx target, int unsignedp)
{
- struct expand_operand eops[4];
+ class expand_operand eops[4];
tree oprnd0, oprnd1, oprnd2;
machine_mode wmode = VOIDmode, tmode0, tmode1 = VOIDmode;
optab widen_pattern_optab;
@@ -344,7 +344,7 @@ rtx
expand_ternary_op (machine_mode mode, optab ternary_optab, rtx op0,
rtx op1, rtx op2, rtx target, int unsignedp)
{
- struct expand_operand ops[4];
+ class expand_operand ops[4];
enum insn_code icode = optab_handler (ternary_optab, mode);
gcc_assert (optab_handler (ternary_optab, mode) != CODE_FOR_nothing);
@@ -413,7 +413,7 @@ expand_vector_broadcast (machine_mode vmode, rtx op)
insn_code icode = optab_handler (vec_duplicate_optab, vmode);
if (icode != CODE_FOR_nothing)
{
- struct expand_operand ops[2];
+ class expand_operand ops[2];
create_output_operand (&ops[0], NULL_RTX, vmode);
create_input_operand (&ops[1], op, GET_MODE (op));
expand_insn (icode, 2, ops);
@@ -1039,7 +1039,7 @@ expand_binop_directly (enum insn_code icode, machine_mode mode, optab binoptab,
machine_mode xmode0 = insn_data[(int) icode].operand[1].mode;
machine_mode xmode1 = insn_data[(int) icode].operand[2].mode;
machine_mode mode0, mode1, tmp_mode;
- struct expand_operand ops[3];
+ class expand_operand ops[3];
bool commutative_p;
rtx_insn *pat;
rtx xop0 = op0, xop1 = op1;
@@ -2012,7 +2012,7 @@ expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
{
- struct expand_operand ops[3];
+ class expand_operand ops[3];
enum insn_code icode = optab_handler (unoptab, mode);
create_fixed_operand (&ops[0], targ0);
@@ -2084,7 +2084,7 @@ expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
if (optab_handler (binoptab, mode) != CODE_FOR_nothing)
{
- struct expand_operand ops[4];
+ class expand_operand ops[4];
enum insn_code icode = optab_handler (binoptab, mode);
machine_mode mode0 = insn_data[icode].operand[1].mode;
machine_mode mode1 = insn_data[icode].operand[2].mode;
@@ -2724,7 +2724,7 @@ expand_unop_direct (machine_mode mode, optab unoptab, rtx op0, rtx target,
{
if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
{
- struct expand_operand ops[2];
+ class expand_operand ops[2];
enum insn_code icode = optab_handler (unoptab, mode);
rtx_insn *last = get_last_insn ();
rtx_insn *pat;
@@ -3578,7 +3578,7 @@ bool
maybe_emit_unop_insn (enum insn_code icode, rtx target, rtx op0,
enum rtx_code code)
{
- struct expand_operand ops[2];
+ class expand_operand ops[2];
rtx_insn *pat;
create_output_operand (&ops[0], target, GET_MODE (target));
@@ -4289,7 +4289,7 @@ emit_indirect_jump (rtx loc)
sorry ("indirect jumps are not available on this target");
else
{
- struct expand_operand ops[1];
+ class expand_operand ops[1];
create_address_operand (&ops[0], loc);
expand_jump_insn (targetm.code_for_indirect_jump, 1, ops);
emit_barrier ();
@@ -4394,7 +4394,7 @@ emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
OPTAB_WIDEN, &comparison, &cmpmode);
if (comparison)
{
- struct expand_operand ops[4];
+ class expand_operand ops[4];
create_output_operand (&ops[0], target, mode);
create_fixed_operand (&ops[1], comparison);
@@ -4460,7 +4460,7 @@ emit_conditional_neg_or_complement (rtx target, rtx_code code,
target = gen_reg_rtx (mode);
rtx_insn *last = get_last_insn ();
- struct expand_operand ops[4];
+ class expand_operand ops[4];
create_output_operand (&ops[0], target, mode);
create_fixed_operand (&ops[1], cond);
@@ -4548,7 +4548,7 @@ emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
&comparison, &cmode);
if (comparison)
{
- struct expand_operand ops[4];
+ class expand_operand ops[4];
create_output_operand (&ops[0], target, mode);
create_fixed_operand (&ops[1], comparison);
@@ -5414,7 +5414,7 @@ vector_compare_rtx (machine_mode cmp_mode, enum tree_code tcode,
tree t_op0, tree t_op1, bool unsignedp,
enum insn_code icode, unsigned int opno)
{
- struct expand_operand ops[2];
+ class expand_operand ops[2];
rtx rtx_op0, rtx_op1;
machine_mode m0, m1;
enum rtx_code rcode = get_rtx_code (tcode, unsignedp);
@@ -5509,7 +5509,7 @@ expand_vec_perm_1 (enum insn_code icode, rtx target,
{
machine_mode tmode = GET_MODE (target);
machine_mode smode = GET_MODE (sel);
- struct expand_operand ops[4];
+ class expand_operand ops[4];
gcc_assert (GET_MODE_CLASS (smode) == MODE_VECTOR_INT
|| mode_for_int_vector (tmode).require () == smode);
@@ -5596,7 +5596,7 @@ expand_vec_perm_const (machine_mode mode, rtx v0, rtx v1,
rtx shift_amt = shift_amt_for_vec_perm_mask (mode, indices, shift_optab);
if (shift_amt)
{
- struct expand_operand ops[3];
+ class expand_operand ops[3];
if (shift_code != CODE_FOR_nothing)
{
create_output_operand (&ops[0], target, mode);
@@ -5782,7 +5782,7 @@ rtx
expand_vec_cond_mask_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
rtx target)
{
- struct expand_operand ops[4];
+ class expand_operand ops[4];
machine_mode mode = TYPE_MODE (vec_cond_type);
machine_mode mask_mode = TYPE_MODE (TREE_TYPE (op0));
enum insn_code icode = get_vcond_mask_icode (mode, mask_mode);
@@ -5814,7 +5814,7 @@ rtx
expand_vec_cond_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
rtx target)
{
- struct expand_operand ops[6];
+ class expand_operand ops[6];
enum insn_code icode;
rtx comparison, rtx_op1, rtx_op2;
machine_mode mode = TYPE_MODE (vec_cond_type);
@@ -5884,7 +5884,7 @@ expand_vec_cond_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
rtx
expand_vec_series_expr (machine_mode vmode, rtx op0, rtx op1, rtx target)
{
- struct expand_operand ops[3];
+ class expand_operand ops[3];
enum insn_code icode;
machine_mode emode = GET_MODE_INNER (vmode);
@@ -5904,7 +5904,7 @@ expand_vec_series_expr (machine_mode vmode, rtx op0, rtx op1, rtx target)
rtx
expand_vec_cmp_expr (tree type, tree exp, rtx target)
{
- struct expand_operand ops[4];
+ class expand_operand ops[4];
enum insn_code icode;
rtx comparison;
machine_mode mask_mode = TYPE_MODE (type);
@@ -5945,7 +5945,7 @@ rtx
expand_mult_highpart (machine_mode mode, rtx op0, rtx op1,
rtx target, bool uns_p)
{
- struct expand_operand eops[3];
+ class expand_operand eops[3];
enum insn_code icode;
int method, i;
machine_mode wmode;
@@ -6098,7 +6098,7 @@ maybe_emit_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
icode = direct_optab_handler (atomic_exchange_optab, mode);
if (icode != CODE_FOR_nothing)
{
- struct expand_operand ops[4];
+ class expand_operand ops[4];
create_output_operand (&ops[0], target, mode);
create_fixed_operand (&ops[1], mem);
@@ -6136,7 +6136,7 @@ maybe_emit_sync_lock_test_and_set (rtx target, rtx mem, rtx val,
if (icode != CODE_FOR_nothing)
{
- struct expand_operand ops[3];
+ class expand_operand ops[3];
create_output_operand (&ops[0], target, mode);
create_fixed_operand (&ops[1], mem);
create_input_operand (&ops[2], val, mode);
@@ -6196,7 +6196,7 @@ static rtx
maybe_emit_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
{
machine_mode pat_bool_mode;
- struct expand_operand ops[3];
+ class expand_operand ops[3];
if (!targetm.have_atomic_test_and_set ())
return NULL_RTX;
@@ -6366,7 +6366,7 @@ expand_atomic_compare_and_swap (rtx *ptarget_bool, rtx *ptarget_oval,
enum memmodel fail_model)
{
machine_mode mode = GET_MODE (mem);
- struct expand_operand ops[8];
+ class expand_operand ops[8];
enum insn_code icode;
rtx target_oval, target_bool = NULL_RTX;
rtx libfunc;
@@ -6568,7 +6568,7 @@ expand_atomic_load (rtx target, rtx mem, enum memmodel model)
icode = direct_optab_handler (atomic_load_optab, mode);
if (icode != CODE_FOR_nothing)
{
- struct expand_operand ops[3];
+ class expand_operand ops[3];
rtx_insn *last = get_last_insn ();
if (is_mm_seq_cst (model))
expand_memory_blockage ();
@@ -6621,7 +6621,7 @@ expand_atomic_store (rtx mem, rtx val, enum memmodel model, bool use_release)
{
machine_mode mode = GET_MODE (mem);
enum insn_code icode;
- struct expand_operand ops[3];
+ class expand_operand ops[3];
/* If the target supports the store directly, great. */
icode = direct_optab_handler (atomic_store_optab, mode);
@@ -6831,7 +6831,7 @@ maybe_emit_op (const struct atomic_op_functions *optab, rtx target, rtx mem,
rtx val, bool use_memmodel, enum memmodel model, bool after)
{
machine_mode mode = GET_MODE (mem);
- struct expand_operand ops[4];
+ class expand_operand ops[4];
enum insn_code icode;
int op_counter = 0;
int num_ops;
@@ -7145,7 +7145,7 @@ valid_multiword_target_p (rtx target)
of that rtx if so. */
void
-create_integer_operand (struct expand_operand *op, poly_int64 intval)
+create_integer_operand (class expand_operand *op, poly_int64 intval)
{
create_expand_operand (op, EXPAND_INTEGER,
gen_int_mode (intval, MAX_MODE_INT),
@@ -7157,7 +7157,7 @@ create_integer_operand (struct expand_operand *op, poly_int64 intval)
static bool
maybe_legitimize_operand_same_code (enum insn_code icode, unsigned int opno,
- struct expand_operand *op)
+ class expand_operand *op)
{
/* See if the operand matches in its current form. */
if (insn_operand_matches (icode, opno, op->value))
@@ -7199,7 +7199,7 @@ maybe_legitimize_operand_same_code (enum insn_code icode, unsigned int opno,
static bool
maybe_legitimize_operand (enum insn_code icode, unsigned int opno,
- struct expand_operand *op)
+ class expand_operand *op)
{
machine_mode mode, imode;
bool old_volatile_ok, result;
@@ -7281,7 +7281,7 @@ maybe_legitimize_operand (enum insn_code icode, unsigned int opno,
TYPE is the type of VALUE. */
void
-create_convert_operand_from_type (struct expand_operand *op,
+create_convert_operand_from_type (class expand_operand *op,
rtx value, tree type)
{
create_convert_operand_from (op, value, TYPE_MODE (type),
@@ -7296,8 +7296,8 @@ create_convert_operand_from_type (struct expand_operand *op,
static inline bool
can_reuse_operands_p (enum insn_code icode,
unsigned int opno1, unsigned int opno2,
- const struct expand_operand *op1,
- const struct expand_operand *op2)
+ const class expand_operand *op1,
+ const class expand_operand *op2)
{
/* Check requirements that are common to all types. */
if (op1->type != op2->type
@@ -7332,7 +7332,7 @@ can_reuse_operands_p (enum insn_code icode,
bool
maybe_legitimize_operands (enum insn_code icode, unsigned int opno,
- unsigned int nops, struct expand_operand *ops)
+ unsigned int nops, class expand_operand *ops)
{
rtx_insn *last = get_last_insn ();
rtx *orig_values = XALLOCAVEC (rtx, nops);
@@ -7374,7 +7374,7 @@ maybe_legitimize_operands (enum insn_code icode, unsigned int opno,
rtx_insn *
maybe_gen_insn (enum insn_code icode, unsigned int nops,
- struct expand_operand *ops)
+ class expand_operand *ops)
{
gcc_assert (nops == (unsigned int) insn_data[(int) icode].n_generator_args);
if (!maybe_legitimize_operands (icode, 0, nops, ops))
@@ -7418,7 +7418,7 @@ maybe_gen_insn (enum insn_code icode, unsigned int nops,
bool
maybe_expand_insn (enum insn_code icode, unsigned int nops,
- struct expand_operand *ops)
+ class expand_operand *ops)
{
rtx_insn *pat = maybe_gen_insn (icode, nops, ops);
if (pat)
@@ -7433,7 +7433,7 @@ maybe_expand_insn (enum insn_code icode, unsigned int nops,
bool
maybe_expand_jump_insn (enum insn_code icode, unsigned int nops,
- struct expand_operand *ops)
+ class expand_operand *ops)
{
rtx_insn *pat = maybe_gen_insn (icode, nops, ops);
if (pat)
@@ -7449,7 +7449,7 @@ maybe_expand_jump_insn (enum insn_code icode, unsigned int nops,
void
expand_insn (enum insn_code icode, unsigned int nops,
- struct expand_operand *ops)
+ class expand_operand *ops)
{
if (!maybe_expand_insn (icode, nops, ops))
gcc_unreachable ();
@@ -7459,7 +7459,7 @@ expand_insn (enum insn_code icode, unsigned int nops,
void
expand_jump_insn (enum insn_code icode, unsigned int nops,
- struct expand_operand *ops)
+ class expand_operand *ops)
{
if (!maybe_expand_jump_insn (icode, nops, ops))
gcc_unreachable ();
diff --git a/gcc/optabs.h b/gcc/optabs.h
index ca7e06366f3..0654107d6e3 100644
--- a/gcc/optabs.h
+++ b/gcc/optabs.h
@@ -71,7 +71,7 @@ public:
to their default values. */
static inline void
-create_expand_operand (struct expand_operand *op,
+create_expand_operand (class expand_operand *op,
enum expand_operand_type type,
rtx value, machine_mode mode,
bool unsigned_p, poly_int64 int_value = 0)
@@ -87,7 +87,7 @@ create_expand_operand (struct expand_operand *op,
/* Make OP describe an operand that must use rtx X, even if X is volatile. */
static inline void
-create_fixed_operand (struct expand_operand *op, rtx x)
+create_fixed_operand (class expand_operand *op, rtx x)
{
create_expand_operand (op, EXPAND_FIXED, x, VOIDmode, false);
}
@@ -98,7 +98,7 @@ create_fixed_operand (struct expand_operand *op, rtx x)
be ignored in that case. */
static inline void
-create_output_operand (struct expand_operand *op, rtx x,
+create_output_operand (class expand_operand *op, rtx x,
machine_mode mode)
{
create_expand_operand (op, EXPAND_OUTPUT, x, mode, false);
@@ -110,7 +110,7 @@ create_output_operand (struct expand_operand *op, rtx x,
as an operand. */
static inline void
-create_input_operand (struct expand_operand *op, rtx value,
+create_input_operand (class expand_operand *op, rtx value,
machine_mode mode)
{
create_expand_operand (op, EXPAND_INPUT, value, mode, false);
@@ -120,7 +120,7 @@ create_input_operand (struct expand_operand *op, rtx value,
to mode MODE. UNSIGNED_P says whether VALUE is unsigned. */
static inline void
-create_convert_operand_to (struct expand_operand *op, rtx value,
+create_convert_operand_to (class expand_operand *op, rtx value,
machine_mode mode, bool unsigned_p)
{
create_expand_operand (op, EXPAND_CONVERT_TO, value, mode, unsigned_p);
@@ -132,7 +132,7 @@ create_convert_operand_to (struct expand_operand *op, rtx value,
UNSIGNED_P says whether VALUE is unsigned. */
static inline void
-create_convert_operand_from (struct expand_operand *op, rtx value,
+create_convert_operand_from (class expand_operand *op, rtx value,
machine_mode mode, bool unsigned_p)
{
create_expand_operand (op, EXPAND_CONVERT_FROM, value, mode, unsigned_p);
@@ -143,12 +143,12 @@ create_convert_operand_from (struct expand_operand *op, rtx value,
of the address, but it may need to be converted to Pmode first. */
static inline void
-create_address_operand (struct expand_operand *op, rtx value)
+create_address_operand (class expand_operand *op, rtx value)
{
create_expand_operand (op, EXPAND_ADDRESS, value, Pmode, false);
}
-extern void create_integer_operand (struct expand_operand *, poly_int64);
+extern void create_integer_operand (class expand_operand *, poly_int64);
/* Passed to expand_simple_binop and expand_binop to say which options
to try to use if the requested operation can't be open-coded on the
@@ -336,21 +336,21 @@ rtx expand_atomic_fetch_op (rtx, rtx, rtx, enum rtx_code, enum memmodel,
extern bool insn_operand_matches (enum insn_code icode, unsigned int opno,
rtx operand);
extern bool valid_multiword_target_p (rtx);
-extern void create_convert_operand_from_type (struct expand_operand *op,
+extern void create_convert_operand_from_type (class expand_operand *op,
rtx value, tree type);
extern bool maybe_legitimize_operands (enum insn_code icode,
unsigned int opno, unsigned int nops,
- struct expand_operand *ops);
+ class expand_operand *ops);
extern rtx_insn *maybe_gen_insn (enum insn_code icode, unsigned int nops,
- struct expand_operand *ops);
+ class expand_operand *ops);
extern bool maybe_expand_insn (enum insn_code icode, unsigned int nops,
- struct expand_operand *ops);
+ class expand_operand *ops);
extern bool maybe_expand_jump_insn (enum insn_code icode, unsigned int nops,
- struct expand_operand *ops);
+ class expand_operand *ops);
extern void expand_insn (enum insn_code icode, unsigned int nops,
- struct expand_operand *ops);
+ class expand_operand *ops);
extern void expand_jump_insn (enum insn_code icode, unsigned int nops,
- struct expand_operand *ops);
+ class expand_operand *ops);
extern enum rtx_code get_rtx_code (enum tree_code tcode, bool unsignedp);
diff --git a/gcc/optinfo.h b/gcc/optinfo.h
index 670b09a5e28..04786b4c03b 100644
--- a/gcc/optinfo.h
+++ b/gcc/optinfo.h
@@ -65,7 +65,7 @@ along with GCC; see the file COPYING3. If not see
/* Forward decls. */
-struct opt_pass;
+class opt_pass;
class optinfo_item;
/* Return true if any of the active optinfo destinations make use
diff --git a/gcc/poly-int.h b/gcc/poly-int.h
index 635f1ebeef6..0ccdf680f43 100644
--- a/gcc/poly-int.h
+++ b/gcc/poly-int.h
@@ -29,7 +29,7 @@ along with GCC; see the file COPYING3. If not see
#ifndef HAVE_POLY_INT_H
#define HAVE_POLY_INT_H
-template<unsigned int N, typename T> class poly_int_pod;
+template<unsigned int N, typename T> struct poly_int_pod;
template<unsigned int N, typename T> class poly_int;
/* poly_coeff_traiits<T> describes the properties of a poly_int
diff --git a/gcc/predict.c b/gcc/predict.c
index 766f418aa69..07f66aab7a3 100644
--- a/gcc/predict.c
+++ b/gcc/predict.c
@@ -87,10 +87,10 @@ static void dump_prediction (FILE *, enum br_predictor, int, basic_block,
enum predictor_reason, edge);
static void predict_paths_leading_to (basic_block, enum br_predictor,
enum prediction,
- struct loop *in_loop = NULL);
+ class loop *in_loop = NULL);
static void predict_paths_leading_to_edge (edge, enum br_predictor,
enum prediction,
- struct loop *in_loop = NULL);
+ class loop *in_loop = NULL);
static bool can_predict_insn_p (const rtx_insn *);
static HOST_WIDE_INT get_predictor_value (br_predictor, HOST_WIDE_INT);
static void determine_unlikely_bbs ();
@@ -355,7 +355,7 @@ optimize_insn_for_speed_p (void)
/* Return TRUE when LOOP should be optimized for size. */
bool
-optimize_loop_for_size_p (struct loop *loop)
+optimize_loop_for_size_p (class loop *loop)
{
return optimize_bb_for_size_p (loop->header);
}
@@ -363,7 +363,7 @@ optimize_loop_for_size_p (struct loop *loop)
/* Return TRUE when LOOP should be optimized for speed. */
bool
-optimize_loop_for_speed_p (struct loop *loop)
+optimize_loop_for_speed_p (class loop *loop)
{
return optimize_bb_for_speed_p (loop->header);
}
@@ -371,9 +371,9 @@ optimize_loop_for_speed_p (struct loop *loop)
/* Return TRUE when LOOP nest should be optimized for speed. */
bool
-optimize_loop_nest_for_speed_p (struct loop *loop)
+optimize_loop_nest_for_speed_p (class loop *loop)
{
- struct loop *l = loop;
+ class loop *l = loop;
if (optimize_loop_for_speed_p (loop))
return true;
l = loop->inner;
@@ -399,7 +399,7 @@ optimize_loop_nest_for_speed_p (struct loop *loop)
/* Return TRUE when LOOP nest should be optimized for size. */
bool
-optimize_loop_nest_for_size_p (struct loop *loop)
+optimize_loop_nest_for_size_p (class loop *loop)
{
return !optimize_loop_nest_for_speed_p (loop);
}
@@ -1471,7 +1471,7 @@ get_base_value (tree t)
Otherwise return false and set LOOP_INVAIANT to NULL. */
static bool
-is_comparison_with_loop_invariant_p (gcond *stmt, struct loop *loop,
+is_comparison_with_loop_invariant_p (gcond *stmt, class loop *loop,
tree *loop_invariant,
enum tree_code *compare_code,
tree *loop_step,
@@ -1637,7 +1637,7 @@ predicted_by_loop_heuristics_p (basic_block bb)
In this loop, we will predict the branch inside the loop to be taken. */
static void
-predict_iv_comparison (struct loop *loop, basic_block bb,
+predict_iv_comparison (class loop *loop, basic_block bb,
tree loop_bound_var,
tree loop_iv_base_var,
enum tree_code loop_bound_code,
@@ -1896,9 +1896,9 @@ predict_extra_loop_exits (edge exit_edge)
static void
predict_loops (void)
{
- struct loop *loop;
+ class loop *loop;
basic_block bb;
- hash_set <struct loop *> with_recursion(10);
+ hash_set <class loop *> with_recursion(10);
FOR_EACH_BB_FN (bb, cfun)
{
@@ -1923,9 +1923,9 @@ predict_loops (void)
basic_block bb, *bbs;
unsigned j, n_exits = 0;
vec<edge> exits;
- struct tree_niter_desc niter_desc;
+ class tree_niter_desc niter_desc;
edge ex;
- struct nb_iter_bound *nb_iter;
+ class nb_iter_bound *nb_iter;
enum tree_code loop_bound_code = ERROR_MARK;
tree loop_bound_step = NULL;
tree loop_bound_var = NULL;
@@ -3135,7 +3135,7 @@ static void
predict_paths_for_bb (basic_block cur, basic_block bb,
enum br_predictor pred,
enum prediction taken,
- bitmap visited, struct loop *in_loop = NULL)
+ bitmap visited, class loop *in_loop = NULL)
{
edge e;
edge_iterator ei;
@@ -3201,7 +3201,7 @@ predict_paths_for_bb (basic_block cur, basic_block bb,
static void
predict_paths_leading_to (basic_block bb, enum br_predictor pred,
- enum prediction taken, struct loop *in_loop)
+ enum prediction taken, class loop *in_loop)
{
predict_paths_for_bb (bb, bb, pred, taken, auto_bitmap (), in_loop);
}
@@ -3210,7 +3210,7 @@ predict_paths_leading_to (basic_block bb, enum br_predictor pred,
static void
predict_paths_leading_to_edge (edge e, enum br_predictor pred,
- enum prediction taken, struct loop *in_loop)
+ enum prediction taken, class loop *in_loop)
{
bool has_nonloop_edge = false;
edge_iterator ei;
@@ -3400,9 +3400,9 @@ propagate_freq (basic_block head, bitmap tovisit)
/* Estimate frequencies in loops at same nest level. */
static void
-estimate_loops_at_level (struct loop *first_loop)
+estimate_loops_at_level (class loop *first_loop)
{
- struct loop *loop;
+ class loop *loop;
for (loop = first_loop; loop; loop = loop->next)
{
@@ -4052,7 +4052,7 @@ pass_profile::execute (function *fun)
profile_status_for_fn (fun) = PROFILE_GUESSED;
if (dump_file && (dump_flags & TDF_DETAILS))
{
- struct loop *loop;
+ class loop *loop;
FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
if (loop->header->count.initialized_p ())
fprintf (dump_file, "Loop got predicted %d to iterate %i times.\n",
diff --git a/gcc/predict.h b/gcc/predict.h
index c1f2f0307dd..5149a97cce4 100644
--- a/gcc/predict.h
+++ b/gcc/predict.h
@@ -68,10 +68,10 @@ extern bool optimize_edge_for_size_p (edge);
extern bool optimize_edge_for_speed_p (edge);
extern bool optimize_insn_for_size_p (void);
extern bool optimize_insn_for_speed_p (void);
-extern bool optimize_loop_for_size_p (struct loop *);
-extern bool optimize_loop_for_speed_p (struct loop *);
-extern bool optimize_loop_nest_for_speed_p (struct loop *);
-extern bool optimize_loop_nest_for_size_p (struct loop *);
+extern bool optimize_loop_for_size_p (class loop *);
+extern bool optimize_loop_for_speed_p (class loop *);
+extern bool optimize_loop_nest_for_speed_p (class loop *);
+extern bool optimize_loop_nest_for_size_p (class loop *);
extern bool predictable_edge_p (edge);
extern void rtl_profile_for_bb (basic_block);
extern void rtl_profile_for_edge (edge);
diff --git a/gcc/profile-count.c b/gcc/profile-count.c
index 2b774a77802..6198675eb89 100644
--- a/gcc/profile-count.c
+++ b/gcc/profile-count.c
@@ -125,7 +125,7 @@ profile_count::differs_from_p (profile_count other) const
/* Stream THIS from IB. */
profile_count
-profile_count::stream_in (struct lto_input_block *ib)
+profile_count::stream_in (class lto_input_block *ib)
{
profile_count ret;
ret.m_val = streamer_read_gcov_count (ib);
@@ -216,7 +216,7 @@ profile_probability::differs_lot_from_p (profile_probability other) const
/* Stream THIS from IB. */
profile_probability
-profile_probability::stream_in (struct lto_input_block *ib)
+profile_probability::stream_in (class lto_input_block *ib)
{
profile_probability ret;
ret.m_val = streamer_read_uhwi (ib);
diff --git a/gcc/profile-count.h b/gcc/profile-count.h
index e584aab641f..ef84ddcc535 100644
--- a/gcc/profile-count.h
+++ b/gcc/profile-count.h
@@ -22,7 +22,7 @@ along with GCC; see the file COPYING3. If not see
#define GCC_PROFILE_COUNT_H
struct function;
-class profile_count;
+struct profile_count;
/* Quality of the profile count. Because gengtype does not support enums
inside of classes, this is in global namespace. */
@@ -154,7 +154,7 @@ class GTY((user)) profile_probability
uint32_t m_val : 29;
enum profile_quality m_quality : 3;
- friend class profile_count;
+ friend struct profile_count;
public:
profile_probability (): m_val (uninitialized_probability),
m_quality (GUESSED)
@@ -615,7 +615,7 @@ public:
profile_count count2) const;
/* LTO streaming support. */
- static profile_probability stream_in (struct lto_input_block *);
+ static profile_probability stream_in (class lto_input_block *);
void stream_out (struct output_block *);
void stream_out (struct lto_output_stream *);
};
@@ -1201,7 +1201,7 @@ public:
profile_quality quality = PRECISE);
/* LTO streaming support. */
- static profile_count stream_in (struct lto_input_block *);
+ static profile_count stream_in (class lto_input_block *);
void stream_out (struct output_block *);
void stream_out (struct lto_output_stream *);
};
diff --git a/gcc/profile.c b/gcc/profile.c
index e3f8c5542be..441cb8eb183 100644
--- a/gcc/profile.c
+++ b/gcc/profile.c
@@ -1370,7 +1370,7 @@ branch_prob (bool thunk)
if (flag_branch_probabilities
&& (profile_status_for_fn (cfun) == PROFILE_READ))
{
- struct loop *loop;
+ class loop *loop;
if (dump_file && (dump_flags & TDF_DETAILS))
report_predictor_hitrates ();
diff --git a/gcc/regrename.c b/gcc/regrename.c
index 7dbbeb91a6d..73c0ceda341 100644
--- a/gcc/regrename.c
+++ b/gcc/regrename.c
@@ -116,7 +116,7 @@ static unsigned current_id;
static vec<du_head_p> id_to_chain;
/* List of currently open chains. */
-static struct du_head *open_chains;
+static class du_head *open_chains;
/* Bitmap of open chains. The bits set always match the list found in
open_chains. */
@@ -135,7 +135,7 @@ static HARD_REG_SET live_hard_regs;
static operand_rr_info *cur_operand;
/* Set while scanning RTL if a register dies. Used to tie chains. */
-static struct du_head *terminated_this_insn;
+static class du_head *terminated_this_insn;
/* Return the chain corresponding to id number ID. Take into account that
chains may have been merged. */
@@ -192,7 +192,7 @@ free_chain_data (void)
another chain whose id is ID. */
static void
-mark_conflict (struct du_head *chains, unsigned id)
+mark_conflict (class du_head *chains, unsigned id)
{
while (chains)
{
@@ -205,7 +205,7 @@ mark_conflict (struct du_head *chains, unsigned id)
use THIS_DU which is part of the chain HEAD. */
static void
-record_operand_use (struct du_head *head, struct du_chain *this_du)
+record_operand_use (class du_head *head, struct du_chain *this_du)
{
if (cur_operand == NULL || cur_operand->failed)
return;
@@ -227,7 +227,7 @@ static du_head_p
create_new_chain (unsigned this_regno, unsigned this_nregs, rtx *loc,
rtx_insn *insn, enum reg_class cl)
{
- struct du_head *head = XOBNEW (&rename_obstack, struct du_head);
+ class du_head *head = XOBNEW (&rename_obstack, class du_head);
struct du_chain *this_du;
int nregs;
@@ -288,7 +288,7 @@ create_new_chain (unsigned this_regno, unsigned this_nregs, rtx *loc,
set the corresponding bits in *PSET. */
static void
-merge_overlapping_regs (HARD_REG_SET *pset, struct du_head *head)
+merge_overlapping_regs (HARD_REG_SET *pset, class du_head *head)
{
bitmap_iterator bi;
unsigned i;
@@ -309,7 +309,7 @@ merge_overlapping_regs (HARD_REG_SET *pset, struct du_head *head)
static bool
check_new_reg_p (int reg ATTRIBUTE_UNUSED, int new_reg,
- struct du_head *this_head, HARD_REG_SET this_unavailable)
+ class du_head *this_head, HARD_REG_SET this_unavailable)
{
machine_mode mode = GET_MODE (*this_head->first->loc);
int nregs = hard_regno_nregs (new_reg, mode);
@@ -561,7 +561,7 @@ public:
/* Initialize a rename_info structure P for basic block BB, which starts a new
scan. */
static void
-init_rename_info (struct bb_rename_info *p, basic_block bb)
+init_rename_info (class bb_rename_info *p, basic_block bb)
{
int i;
df_ref def;
@@ -616,7 +616,7 @@ init_rename_info (struct bb_rename_info *p, basic_block bb)
/* Record in RI that the block corresponding to it has an incoming
live value, described by CHAIN. */
static void
-set_incoming_from_chain (struct bb_rename_info *ri, du_head_p chain)
+set_incoming_from_chain (class bb_rename_info *ri, du_head_p chain)
{
int i;
int incoming_nregs = ri->incoming[chain->regno].nregs;
@@ -690,7 +690,7 @@ merge_chains (du_head_p c1, du_head_p c2)
void
regrename_analyze (bitmap bb_mask)
{
- struct bb_rename_info *rename_info;
+ class bb_rename_info *rename_info;
int i;
basic_block bb;
int n_bbs;
@@ -700,11 +700,11 @@ regrename_analyze (bitmap bb_mask)
n_bbs = pre_and_rev_post_order_compute (NULL, inverse_postorder, false);
/* Gather some information about the blocks in this function. */
- rename_info = XCNEWVEC (struct bb_rename_info, n_basic_blocks_for_fn (cfun));
+ rename_info = XCNEWVEC (class bb_rename_info, n_basic_blocks_for_fn (cfun));
i = 0;
FOR_EACH_BB_FN (bb, cfun)
{
- struct bb_rename_info *ri = rename_info + i;
+ class bb_rename_info *ri = rename_info + i;
ri->bb = bb;
if (bb_mask != NULL && !bitmap_bit_p (bb_mask, bb->index))
bb->aux = NULL;
@@ -725,13 +725,13 @@ regrename_analyze (bitmap bb_mask)
for (i = 0; i < n_bbs; i++)
{
basic_block bb1 = BASIC_BLOCK_FOR_FN (cfun, inverse_postorder[i]);
- struct bb_rename_info *this_info;
+ class bb_rename_info *this_info;
bool success;
edge e;
edge_iterator ei;
int old_length = id_to_chain.length ();
- this_info = (struct bb_rename_info *) bb1->aux;
+ this_info = (class bb_rename_info *) bb1->aux;
if (this_info == NULL)
continue;
@@ -771,15 +771,15 @@ regrename_analyze (bitmap bb_mask)
will be used to pre-open chains when processing the successors. */
FOR_EACH_EDGE (e, ei, bb1->succs)
{
- struct bb_rename_info *dest_ri;
- struct du_head *chain;
+ class bb_rename_info *dest_ri;
+ class du_head *chain;
if (dump_file)
fprintf (dump_file, "successor block %d\n", e->dest->index);
if (e->flags & (EDGE_EH | EDGE_ABNORMAL))
continue;
- dest_ri = (struct bb_rename_info *)e->dest->aux;
+ dest_ri = (class bb_rename_info *)e->dest->aux;
if (dest_ri == NULL)
continue;
for (chain = open_chains; chain; chain = chain->next_chain)
@@ -808,7 +808,7 @@ regrename_analyze (bitmap bb_mask)
edges). */
FOR_EACH_BB_FN (bb, cfun)
{
- struct bb_rename_info *bb_ri = (struct bb_rename_info *) bb->aux;
+ class bb_rename_info *bb_ri = (class bb_rename_info *) bb->aux;
unsigned j;
bitmap_iterator bi;
@@ -822,12 +822,12 @@ regrename_analyze (bitmap bb_mask)
{
edge e;
edge_iterator ei;
- struct du_head *chain = regrename_chain_from_id (j);
+ class du_head *chain = regrename_chain_from_id (j);
int n_preds_used = 0, n_preds_joined = 0;
FOR_EACH_EDGE (e, ei, bb->preds)
{
- struct bb_rename_info *src_ri;
+ class bb_rename_info *src_ri;
unsigned k;
bitmap_iterator bi2;
HARD_REG_SET live;
@@ -842,14 +842,14 @@ regrename_analyze (bitmap bb_mask)
if (e->flags & (EDGE_EH | EDGE_ABNORMAL))
continue;
- src_ri = (struct bb_rename_info *)e->src->aux;
+ src_ri = (class bb_rename_info *)e->src->aux;
if (src_ri == NULL)
continue;
EXECUTE_IF_SET_IN_BITMAP (&src_ri->open_chains_set,
0, k, bi2)
{
- struct du_head *outgoing_chain = regrename_chain_from_id (k);
+ class du_head *outgoing_chain = regrename_chain_from_id (k);
if (outgoing_chain->regno == chain->regno
&& outgoing_chain->nregs == chain->nregs)
@@ -873,7 +873,7 @@ regrename_analyze (bitmap bb_mask)
}
FOR_EACH_BB_FN (bb, cfun)
{
- struct bb_rename_info *bb_ri = (struct bb_rename_info *) bb->aux;
+ class bb_rename_info *bb_ri = (class bb_rename_info *) bb->aux;
unsigned j;
bitmap_iterator bi;
@@ -887,13 +887,13 @@ regrename_analyze (bitmap bb_mask)
{
edge e;
edge_iterator ei;
- struct du_head *chain = regrename_chain_from_id (j);
+ class du_head *chain = regrename_chain_from_id (j);
int n_succs_used = 0, n_succs_joined = 0;
FOR_EACH_EDGE (e, ei, bb->succs)
{
bool printed = false;
- struct bb_rename_info *dest_ri;
+ class bb_rename_info *dest_ri;
unsigned k;
bitmap_iterator bi2;
HARD_REG_SET live;
@@ -905,14 +905,14 @@ regrename_analyze (bitmap bb_mask)
n_succs_used++;
- dest_ri = (struct bb_rename_info *)e->dest->aux;
+ dest_ri = (class bb_rename_info *)e->dest->aux;
if (dest_ri == NULL)
continue;
EXECUTE_IF_SET_IN_BITMAP (&dest_ri->incoming_open_chains_set,
0, k, bi2)
{
- struct du_head *incoming_chain = regrename_chain_from_id (k);
+ class du_head *incoming_chain = regrename_chain_from_id (k);
if (incoming_chain->regno == chain->regno
&& incoming_chain->nregs == chain->nregs)
@@ -959,7 +959,7 @@ regrename_analyze (bitmap bb_mask)
numbering in its subpatterns. */
bool
-regrename_do_replace (struct du_head *head, int reg)
+regrename_do_replace (class du_head *head, int reg)
{
struct du_chain *chain;
unsigned int base_regno = head->regno;
@@ -969,7 +969,7 @@ regrename_do_replace (struct du_head *head, int reg)
for (chain = head->first; chain; chain = chain->next_use)
{
unsigned int regno = ORIGINAL_REGNO (*chain->loc);
- struct reg_attrs *attr = REG_ATTRS (*chain->loc);
+ class reg_attrs *attr = REG_ATTRS (*chain->loc);
int reg_ptr = REG_POINTER (*chain->loc);
if (DEBUG_INSN_P (chain->insn) && REGNO (*chain->loc) != base_regno)
@@ -1053,7 +1053,7 @@ static void
note_sets_clobbers (rtx x, const_rtx set, void *data)
{
enum rtx_code code = *(enum rtx_code *)data;
- struct du_head *chain;
+ class du_head *chain;
if (GET_CODE (x) == SUBREG)
x = SUBREG_REG (x);
@@ -1070,7 +1070,7 @@ static void
scan_rtx_reg (rtx_insn *insn, rtx *loc, enum reg_class cl, enum scan_actions action,
enum op_type type)
{
- struct du_head **p;
+ class du_head **p;
rtx x = *loc;
unsigned this_regno = REGNO (x);
int this_nregs = REG_NREGS (x);
@@ -1116,8 +1116,8 @@ scan_rtx_reg (rtx_insn *insn, rtx *loc, enum reg_class cl, enum scan_actions act
for (p = &open_chains; *p;)
{
- struct du_head *head = *p;
- struct du_head *next = head->next_chain;
+ class du_head *head = *p;
+ class du_head *next = head->next_chain;
int exact_match = (head->regno == this_regno
&& head->nregs == this_nregs);
int superset = (this_regno <= head->regno
@@ -1588,7 +1588,7 @@ record_out_operands (rtx_insn *insn, bool earlyclobber, insn_rr_info *insn_info)
rtx op = *loc;
enum reg_class cl = alternative_class (op_alt, opn);
- struct du_head *prev_open;
+ class du_head *prev_open;
if (recog_data.operand_type[opn] != OP_OUT
|| op_alt[opn].earlyclobber != earlyclobber)
@@ -1835,7 +1835,7 @@ build_def_use (basic_block bb)
requires a caller-saved reg. */
if (CALL_P (insn))
{
- struct du_head *p;
+ class du_head *p;
for (p = open_chains; p; p = p->next_chain)
p->need_caller_save_reg = 1;
}
diff --git a/gcc/regrename.h b/gcc/regrename.h
index e9f28236cde..2fe12d5aa61 100644
--- a/gcc/regrename.h
+++ b/gcc/regrename.h
@@ -26,11 +26,11 @@ class du_head
{
public:
/* The next chain. */
- struct du_head *next_chain;
+ class du_head *next_chain;
/* The first and last elements of this chain. */
struct du_chain *first, *last;
/* The chain that this chain is tied to. */
- struct du_head *tied_chain;
+ class du_head *tied_chain;
/* Describes the register being tracked. */
unsigned regno;
int nregs;
@@ -56,7 +56,7 @@ public:
unsigned int target_data_2;
};
-typedef struct du_head *du_head_p;
+typedef class du_head *du_head_p;
/* This struct describes a single occurrence of a register. */
struct du_chain
@@ -82,7 +82,7 @@ struct operand_rr_info
/* Holds either the chain for the operand itself, or for the registers in
a memory operand. */
struct du_chain *chains[MAX_REGS_PER_ADDRESS];
- struct du_head *heads[MAX_REGS_PER_ADDRESS];
+ class du_head *heads[MAX_REGS_PER_ADDRESS];
};
/* A struct to hold a vector of operand_rr_info structures describing the
diff --git a/gcc/reload.h b/gcc/reload.h
index edfeebfff0e..eb497712498 100644
--- a/gcc/reload.h
+++ b/gcc/reload.h
@@ -282,11 +282,11 @@ class insn_chain
{
public:
/* Links to the neighbor instructions. */
- struct insn_chain *next, *prev;
+ class insn_chain *next, *prev;
/* Link through a chains set up by calculate_needs_all_insns, containing
all insns that need reloading. */
- struct insn_chain *next_need_reload;
+ class insn_chain *next_need_reload;
/* The rtx of the insn. */
rtx_insn *insn;
@@ -320,10 +320,10 @@ public:
/* A chain of insn_chain structures to describe all non-note insns in
a function. */
-extern struct insn_chain *reload_insn_chain;
+extern class insn_chain *reload_insn_chain;
/* Allocate a new insn_chain structure. */
-extern struct insn_chain *new_insn_chain (void);
+extern class insn_chain *new_insn_chain (void);
#endif
#if defined SET_HARD_REG_BIT
diff --git a/gcc/reload1.c b/gcc/reload1.c
index 3ad6f1d9c1f..38ee356a791 100644
--- a/gcc/reload1.c
+++ b/gcc/reload1.c
@@ -243,14 +243,14 @@ static char *reload_insn_firstobj;
/* List of insn_chain instructions, one for every insn that reload needs to
examine. */
-struct insn_chain *reload_insn_chain;
+class insn_chain *reload_insn_chain;
/* TRUE if we potentially left dead insns in the insn stream and want to
run DCE immediately after reload, FALSE otherwise. */
static bool need_dce;
/* List of all insns needing reloads. */
-static struct insn_chain *insns_need_reload;
+static class insn_chain *insns_need_reload;
/* This structure is used to record information about register eliminations.
Each array entry describes one possible way of eliminating a register
@@ -336,10 +336,10 @@ static int num_labels;
static void replace_pseudos_in (rtx *, machine_mode, rtx);
static void maybe_fix_stack_asms (void);
-static void copy_reloads (struct insn_chain *);
+static void copy_reloads (class insn_chain *);
static void calculate_needs_all_insns (int);
-static int find_reg (struct insn_chain *, int);
-static void find_reload_regs (struct insn_chain *);
+static int find_reg (class insn_chain *, int);
+static void find_reload_regs (class insn_chain *);
static void select_reload_regs (void);
static void delete_caller_save_insns (void);
@@ -368,7 +368,7 @@ static void spill_hard_reg (unsigned int, int);
static int finish_spills (int);
static void scan_paradoxical_subregs (rtx);
static void count_pseudo (int);
-static void order_regs_for_reload (struct insn_chain *);
+static void order_regs_for_reload (class insn_chain *);
static void reload_as_needed (int);
static void forget_old_reloads_1 (rtx, const_rtx, void *);
static void forget_marked_reloads (regset);
@@ -382,19 +382,19 @@ static int reload_reg_free_for_value_p (int, int, int, enum reload_type,
rtx, rtx, int, int);
static int free_for_value_p (int, machine_mode, int, enum reload_type,
rtx, rtx, int, int);
-static int allocate_reload_reg (struct insn_chain *, int, int);
+static int allocate_reload_reg (class insn_chain *, int, int);
static int conflicts_with_override (rtx);
static void failed_reload (rtx_insn *, int);
static int set_reload_reg (int, int);
-static void choose_reload_regs_init (struct insn_chain *, rtx *);
-static void choose_reload_regs (struct insn_chain *);
-static void emit_input_reload_insns (struct insn_chain *, struct reload *,
+static void choose_reload_regs_init (class insn_chain *, rtx *);
+static void choose_reload_regs (class insn_chain *);
+static void emit_input_reload_insns (class insn_chain *, struct reload *,
rtx, int);
-static void emit_output_reload_insns (struct insn_chain *, struct reload *,
+static void emit_output_reload_insns (class insn_chain *, struct reload *,
int);
-static void do_input_reload (struct insn_chain *, struct reload *, int);
-static void do_output_reload (struct insn_chain *, struct reload *, int);
-static void emit_reload_insns (struct insn_chain *);
+static void do_input_reload (class insn_chain *, struct reload *, int);
+static void do_output_reload (class insn_chain *, struct reload *, int);
+static void emit_reload_insns (class insn_chain *);
static void delete_output_reload (rtx_insn *, int, int, rtx);
static void delete_address_reloads (rtx_insn *, rtx_insn *);
static void delete_address_reloads_1 (rtx_insn *, rtx, rtx_insn *);
@@ -467,17 +467,17 @@ init_reload (void)
}
/* List of insn chains that are currently unused. */
-static struct insn_chain *unused_insn_chains = 0;
+static class insn_chain *unused_insn_chains = 0;
/* Allocate an empty insn_chain structure. */
-struct insn_chain *
+class insn_chain *
new_insn_chain (void)
{
- struct insn_chain *c;
+ class insn_chain *c;
if (unused_insn_chains == 0)
{
- c = XOBNEW (&reload_obstack, struct insn_chain);
+ c = XOBNEW (&reload_obstack, class insn_chain);
INIT_REG_SET (&c->live_throughout);
INIT_REG_SET (&c->dead_or_set);
}
@@ -1315,7 +1315,7 @@ maybe_fix_stack_asms (void)
#ifdef STACK_REGS
const char *constraints[MAX_RECOG_OPERANDS];
machine_mode operand_mode[MAX_RECOG_OPERANDS];
- struct insn_chain *chain;
+ class insn_chain *chain;
for (chain = reload_insn_chain; chain != 0; chain = chain->next)
{
@@ -1414,7 +1414,7 @@ maybe_fix_stack_asms (void)
/* Copy the global variables n_reloads and rld into the corresponding elts
of CHAIN. */
static void
-copy_reloads (struct insn_chain *chain)
+copy_reloads (class insn_chain *chain)
{
chain->n_reloads = n_reloads;
chain->rld = XOBNEWVEC (&reload_obstack, struct reload, n_reloads);
@@ -1428,8 +1428,8 @@ copy_reloads (struct insn_chain *chain)
static void
calculate_needs_all_insns (int global)
{
- struct insn_chain **pprev_reload = &insns_need_reload;
- struct insn_chain *chain, *next = 0;
+ class insn_chain **pprev_reload = &insns_need_reload;
+ class insn_chain *chain, *next = 0;
something_needs_elimination = 0;
@@ -1725,7 +1725,7 @@ count_pseudo (int reg)
contents of BAD_SPILL_REGS for the insn described by CHAIN. */
static void
-order_regs_for_reload (struct insn_chain *chain)
+order_regs_for_reload (class insn_chain *chain)
{
unsigned i;
HARD_REG_SET used_by_pseudos;
@@ -1809,7 +1809,7 @@ count_spilled_pseudo (int spilled, int spilled_nregs, int reg)
/* Find reload register to use for reload number ORDER. */
static int
-find_reg (struct insn_chain *chain, int order)
+find_reg (class insn_chain *chain, int order)
{
int rnum = reload_order[order];
struct reload *rl = rld + rnum;
@@ -1954,7 +1954,7 @@ find_reg (struct insn_chain *chain, int order)
for a smaller class even though it belongs to that class. */
static void
-find_reload_regs (struct insn_chain *chain)
+find_reload_regs (class insn_chain *chain)
{
int i;
@@ -2016,7 +2016,7 @@ find_reload_regs (struct insn_chain *chain)
static void
select_reload_regs (void)
{
- struct insn_chain *chain;
+ class insn_chain *chain;
/* Try to satisfy the needs for each insn. */
for (chain = insns_need_reload; chain != 0;
@@ -2029,13 +2029,13 @@ select_reload_regs (void)
static void
delete_caller_save_insns (void)
{
- struct insn_chain *c = reload_insn_chain;
+ class insn_chain *c = reload_insn_chain;
while (c != 0)
{
while (c != 0 && c->is_caller_save_insn)
{
- struct insn_chain *next = c->next;
+ class insn_chain *next = c->next;
rtx_insn *insn = c->insn;
if (c == reload_insn_chain)
@@ -4194,7 +4194,7 @@ spill_hard_reg (unsigned int regno, int cant_eliminate)
static int
finish_spills (int global)
{
- struct insn_chain *chain;
+ class insn_chain *chain;
int something_changed = 0;
unsigned i;
reg_set_iterator rsi;
@@ -4459,7 +4459,7 @@ fixup_eh_region_note (rtx_insn *insn, rtx_insn *prev, rtx_insn *next)
static void
reload_as_needed (int live_known)
{
- struct insn_chain *chain;
+ class insn_chain *chain;
#if AUTO_INC_DEC
int i;
#endif
@@ -6092,7 +6092,7 @@ set_reload_reg (int i, int r)
we didn't change anything. */
static int
-allocate_reload_reg (struct insn_chain *chain ATTRIBUTE_UNUSED, int r,
+allocate_reload_reg (class insn_chain *chain ATTRIBUTE_UNUSED, int r,
int last_reload)
{
int i, pass, count;
@@ -6223,7 +6223,7 @@ allocate_reload_reg (struct insn_chain *chain ATTRIBUTE_UNUSED, int r,
is the array we use to restore the reg_rtx field for every reload. */
static void
-choose_reload_regs_init (struct insn_chain *chain, rtx *save_reload_reg_rtx)
+choose_reload_regs_init (class insn_chain *chain, rtx *save_reload_reg_rtx)
{
int i;
@@ -6324,7 +6324,7 @@ compute_reload_subreg_offset (machine_mode outermode,
finding a reload reg in the proper class. */
static void
-choose_reload_regs (struct insn_chain *chain)
+choose_reload_regs (class insn_chain *chain)
{
rtx_insn *insn = chain->insn;
int i, j;
@@ -7113,7 +7113,7 @@ reload_adjust_reg_for_icode (rtx *reload_reg, rtx alt_reload_reg,
has the number J. OLD contains the value to be used as input. */
static void
-emit_input_reload_insns (struct insn_chain *chain, struct reload *rl,
+emit_input_reload_insns (class insn_chain *chain, struct reload *rl,
rtx old, int j)
{
rtx_insn *insn = chain->insn;
@@ -7573,7 +7573,7 @@ emit_input_reload_insns (struct insn_chain *chain, struct reload *rl,
/* Generate insns to for the output reload RL, which is for the insn described
by CHAIN and has the number J. */
static void
-emit_output_reload_insns (struct insn_chain *chain, struct reload *rl,
+emit_output_reload_insns (class insn_chain *chain, struct reload *rl,
int j)
{
rtx reloadreg;
@@ -7779,7 +7779,7 @@ emit_output_reload_insns (struct insn_chain *chain, struct reload *rl,
/* Do input reloading for reload RL, which is for the insn described by CHAIN
and has the number J. */
static void
-do_input_reload (struct insn_chain *chain, struct reload *rl, int j)
+do_input_reload (class insn_chain *chain, struct reload *rl, int j)
{
rtx_insn *insn = chain->insn;
rtx old = (rl->in && MEM_P (rl->in)
@@ -7880,7 +7880,7 @@ do_input_reload (struct insn_chain *chain, struct reload *rl, int j)
??? At some point we need to support handling output reloads of
JUMP_INSNs or insns that set cc0. */
static void
-do_output_reload (struct insn_chain *chain, struct reload *rl, int j)
+do_output_reload (class insn_chain *chain, struct reload *rl, int j)
{
rtx note, old;
rtx_insn *insn = chain->insn;
@@ -7986,7 +7986,7 @@ inherit_piecemeal_p (int dest ATTRIBUTE_UNUSED,
/* Output insns to reload values in and out of the chosen reload regs. */
static void
-emit_reload_insns (struct insn_chain *chain)
+emit_reload_insns (class insn_chain *chain)
{
rtx_insn *insn = chain->insn;
diff --git a/gcc/rtl.h b/gcc/rtl.h
index ad2f3cd5de6..039ab05f951 100644
--- a/gcc/rtl.h
+++ b/gcc/rtl.h
@@ -210,7 +210,7 @@ union rtunion
tree rt_tree;
basic_block rt_bb;
mem_attrs *rt_mem;
- struct constant_descriptor_rtx *rt_constant;
+ class constant_descriptor_rtx *rt_constant;
struct dw_cfi_node *rt_cfi;
};
@@ -3726,7 +3726,7 @@ struct GTY(()) target_rtl {
rtx x_static_reg_base_value[FIRST_PSEUDO_REGISTER];
/* The default memory attributes for each mode. */
- struct mem_attrs *x_mode_mem_attrs[(int) MAX_MACHINE_MODE];
+ class mem_attrs *x_mode_mem_attrs[(int) MAX_MACHINE_MODE];
/* Track if RTL has been initialized. */
bool target_specific_initialized;
@@ -3760,10 +3760,10 @@ extern struct target_rtl *this_target_rtl;
#ifndef GENERATOR_FILE
/* Return the attributes of a MEM rtx. */
-static inline const struct mem_attrs *
+static inline const class mem_attrs *
get_mem_attrs (const_rtx x)
{
- struct mem_attrs *attrs;
+ class mem_attrs *attrs;
attrs = MEM_ATTRS (x);
if (!attrs)
diff --git a/gcc/sanopt.c b/gcc/sanopt.c
index bf9fdc9cb83..00ade872832 100644
--- a/gcc/sanopt.c
+++ b/gcc/sanopt.c
@@ -354,7 +354,7 @@ maybe_get_dominating_check (auto_vec<gimple *> &v)
/* Optimize away redundant UBSAN_NULL calls. */
static bool
-maybe_optimize_ubsan_null_ifn (struct sanopt_ctx *ctx, gimple *stmt)
+maybe_optimize_ubsan_null_ifn (class sanopt_ctx *ctx, gimple *stmt)
{
gcc_assert (gimple_call_num_args (stmt) == 3);
tree ptr = gimple_call_arg (stmt, 0);
@@ -591,7 +591,7 @@ maybe_optimize_ubsan_ptr_ifn (sanopt_ctx *ctx, gimple *stmt)
when we can actually optimize. */
static bool
-maybe_optimize_ubsan_vptr_ifn (struct sanopt_ctx *ctx, gimple *stmt)
+maybe_optimize_ubsan_vptr_ifn (class sanopt_ctx *ctx, gimple *stmt)
{
gcc_assert (gimple_call_num_args (stmt) == 5);
sanopt_tree_triplet triplet;
@@ -695,7 +695,7 @@ can_remove_asan_check (auto_vec<gimple *> &v, tree len, basic_block bb)
/* Optimize away redundant ASAN_CHECK calls. */
static bool
-maybe_optimize_asan_check_ifn (struct sanopt_ctx *ctx, gimple *stmt)
+maybe_optimize_asan_check_ifn (class sanopt_ctx *ctx, gimple *stmt)
{
gcc_assert (gimple_call_num_args (stmt) == 4);
tree ptr = gimple_call_arg (stmt, 1);
@@ -768,7 +768,7 @@ maybe_optimize_asan_check_ifn (struct sanopt_ctx *ctx, gimple *stmt)
anything anymore. CTX is a sanopt context. */
static void
-sanopt_optimize_walker (basic_block bb, struct sanopt_ctx *ctx)
+sanopt_optimize_walker (basic_block bb, class sanopt_ctx *ctx)
{
basic_block son;
gimple_stmt_iterator gsi;
@@ -887,7 +887,7 @@ sanopt_optimize_walker (basic_block bb, struct sanopt_ctx *ctx)
static int
sanopt_optimize (function *fun, bool *contains_asan_mark)
{
- struct sanopt_ctx ctx;
+ class sanopt_ctx ctx;
ctx.asan_num_accesses = 0;
ctx.contains_asan_mark = false;
diff --git a/gcc/sched-deps.c b/gcc/sched-deps.c
index 28b9d38ab3f..5cb4a462ce9 100644
--- a/gcc/sched-deps.c
+++ b/gcc/sched-deps.c
@@ -475,16 +475,16 @@ static int deps_may_trap_p (const_rtx);
static void add_dependence_1 (rtx_insn *, rtx_insn *, enum reg_note);
static void add_dependence_list (rtx_insn *, rtx_insn_list *, int,
enum reg_note, bool);
-static void add_dependence_list_and_free (struct deps_desc *, rtx_insn *,
+static void add_dependence_list_and_free (class deps_desc *, rtx_insn *,
rtx_insn_list **, int, enum reg_note,
bool);
static void delete_all_dependences (rtx_insn *);
static void chain_to_prev_insn (rtx_insn *);
-static void flush_pending_lists (struct deps_desc *, rtx_insn *, int, int);
-static void sched_analyze_1 (struct deps_desc *, rtx, rtx_insn *);
-static void sched_analyze_2 (struct deps_desc *, rtx, rtx_insn *);
-static void sched_analyze_insn (struct deps_desc *, rtx, rtx_insn *);
+static void flush_pending_lists (class deps_desc *, rtx_insn *, int, int);
+static void sched_analyze_1 (class deps_desc *, rtx, rtx_insn *);
+static void sched_analyze_2 (class deps_desc *, rtx, rtx_insn *);
+static void sched_analyze_insn (class deps_desc *, rtx, rtx_insn *);
static bool sched_has_condition_p (const rtx_insn *);
static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
@@ -1574,7 +1574,7 @@ add_dependence_list (rtx_insn *insn, rtx_insn_list *list, int uncond,
newly created dependencies. */
static void
-add_dependence_list_and_free (struct deps_desc *deps, rtx_insn *insn,
+add_dependence_list_and_free (class deps_desc *deps, rtx_insn *insn,
rtx_insn_list **listp,
int uncond, enum reg_note dep_type, bool hard)
{
@@ -1708,7 +1708,7 @@ chain_to_prev_insn (rtx_insn *insn)
so that we can do memory aliasing on it. */
static void
-add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
+add_insn_mem_dependence (class deps_desc *deps, bool read_p,
rtx_insn *insn, rtx mem)
{
rtx_insn_list **insn_list;
@@ -1749,7 +1749,7 @@ add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
dependencies for a read operation, similarly with FOR_WRITE. */
static void
-flush_pending_lists (struct deps_desc *deps, rtx_insn *insn, int for_read,
+flush_pending_lists (class deps_desc *deps, rtx_insn *insn, int for_read,
int for_write)
{
if (for_write)
@@ -1953,7 +1953,7 @@ create_insn_reg_set (int regno, rtx insn)
/* Set up insn register uses for INSN and dependency context DEPS. */
static void
-setup_insn_reg_uses (struct deps_desc *deps, rtx_insn *insn)
+setup_insn_reg_uses (class deps_desc *deps, rtx_insn *insn)
{
unsigned i;
reg_set_iterator rsi;
@@ -2245,7 +2245,7 @@ static bool can_start_lhs_rhs_p;
/* Extend reg info for the deps context DEPS given that
we have just generated a register numbered REGNO. */
static void
-extend_deps_reg_info (struct deps_desc *deps, int regno)
+extend_deps_reg_info (class deps_desc *deps, int regno)
{
int max_regno = regno + 1;
@@ -2294,7 +2294,7 @@ maybe_extend_reg_info_p (void)
CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
static void
-sched_analyze_reg (struct deps_desc *deps, int regno, machine_mode mode,
+sched_analyze_reg (class deps_desc *deps, int regno, machine_mode mode,
enum rtx_code ref, rtx_insn *insn)
{
/* We could emit new pseudos in renaming. Extend the reg structures. */
@@ -2382,7 +2382,7 @@ sched_analyze_reg (struct deps_desc *deps, int regno, machine_mode mode,
destination of X, and reads of everything mentioned. */
static void
-sched_analyze_1 (struct deps_desc *deps, rtx x, rtx_insn *insn)
+sched_analyze_1 (class deps_desc *deps, rtx x, rtx_insn *insn)
{
rtx dest = XEXP (x, 0);
enum rtx_code code = GET_CODE (x);
@@ -2556,7 +2556,7 @@ sched_analyze_1 (struct deps_desc *deps, rtx x, rtx_insn *insn)
/* Analyze the uses of memory and registers in rtx X in INSN. */
static void
-sched_analyze_2 (struct deps_desc *deps, rtx x, rtx_insn *insn)
+sched_analyze_2 (class deps_desc *deps, rtx x, rtx_insn *insn)
{
int i;
int j;
@@ -2890,7 +2890,7 @@ get_implicit_reg_pending_clobbers (HARD_REG_SET *temp, rtx_insn *insn)
/* Analyze an INSN with pattern X to find all dependencies. */
static void
-sched_analyze_insn (struct deps_desc *deps, rtx x, rtx_insn *insn)
+sched_analyze_insn (class deps_desc *deps, rtx x, rtx_insn *insn)
{
RTX_CODE code = GET_CODE (x);
rtx link;
@@ -3648,7 +3648,7 @@ chain_to_prev_insn_p (rtx_insn *insn)
/* Analyze INSN with DEPS as a context. */
void
-deps_analyze_insn (struct deps_desc *deps, rtx_insn *insn)
+deps_analyze_insn (class deps_desc *deps, rtx_insn *insn)
{
if (sched_deps_info->start_insn)
sched_deps_info->start_insn (insn);
@@ -3815,7 +3815,7 @@ deps_analyze_insn (struct deps_desc *deps, rtx_insn *insn)
/* Initialize DEPS for the new block beginning with HEAD. */
void
-deps_start_bb (struct deps_desc *deps, rtx_insn *head)
+deps_start_bb (class deps_desc *deps, rtx_insn *head)
{
gcc_assert (!deps->readonly);
@@ -3834,7 +3834,7 @@ deps_start_bb (struct deps_desc *deps, rtx_insn *head)
/* Analyze every insn between HEAD and TAIL inclusive, creating backward
dependencies for each insn. */
void
-sched_analyze (struct deps_desc *deps, rtx_insn *head, rtx_insn *tail)
+sched_analyze (class deps_desc *deps, rtx_insn *head, rtx_insn *tail)
{
rtx_insn *insn;
@@ -3928,10 +3928,10 @@ sched_free_deps (rtx_insn *head, rtx_insn *tail, bool resolved_p)
/* Initialize variables for region data dependence analysis.
When LAZY_REG_LAST is true, do not allocate reg_last array
- of struct deps_desc immediately. */
+ of class deps_desc immediately. */
void
-init_deps (struct deps_desc *deps, bool lazy_reg_last)
+init_deps (class deps_desc *deps, bool lazy_reg_last)
{
int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
@@ -3968,7 +3968,7 @@ init_deps (struct deps_desc *deps, bool lazy_reg_last)
/* Init only reg_last field of DEPS, which was not allocated before as
we inited DEPS lazily. */
void
-init_deps_reg_last (struct deps_desc *deps)
+init_deps_reg_last (class deps_desc *deps)
{
gcc_assert (deps && deps->max_reg > 0);
gcc_assert (deps->reg_last == NULL);
@@ -3980,7 +3980,7 @@ init_deps_reg_last (struct deps_desc *deps)
/* Free insn lists found in DEPS. */
void
-free_deps (struct deps_desc *deps)
+free_deps (class deps_desc *deps)
{
unsigned i;
reg_set_iterator rsi;
@@ -4028,7 +4028,7 @@ free_deps (struct deps_desc *deps)
/* Remove INSN from dependence contexts DEPS. */
void
-remove_from_deps (struct deps_desc *deps, rtx_insn *insn)
+remove_from_deps (class deps_desc *deps, rtx_insn *insn)
{
int removed;
unsigned i;
diff --git a/gcc/sched-ebb.c b/gcc/sched-ebb.c
index 4875eef96a8..a594b49ec66 100644
--- a/gcc/sched-ebb.c
+++ b/gcc/sched-ebb.c
@@ -472,7 +472,7 @@ basic_block
schedule_ebb (rtx_insn *head, rtx_insn *tail, bool modulo_scheduling)
{
basic_block first_bb, target_bb;
- struct deps_desc tmp_deps;
+ class deps_desc tmp_deps;
bool success;
/* Blah. We should fix the rest of the code not to get confused by
diff --git a/gcc/sched-int.h b/gcc/sched-int.h
index 5fef221a0ea..fca1bcfe7f0 100644
--- a/gcc/sched-int.h
+++ b/gcc/sched-int.h
@@ -571,7 +571,7 @@ public:
BOOL_BITFIELD readonly : 1;
};
-typedef struct deps_desc *deps_t;
+typedef class deps_desc *deps_t;
/* This structure holds some state of the current scheduling pass, and
contains some function pointers that abstract out some of the non-generic
@@ -1347,14 +1347,14 @@ extern bool sched_insns_conditions_mutex_p (const rtx_insn *,
const rtx_insn *);
extern bool sched_insn_is_legitimate_for_speculation_p (const rtx_insn *, ds_t);
extern void add_dependence (rtx_insn *, rtx_insn *, enum reg_note);
-extern void sched_analyze (struct deps_desc *, rtx_insn *, rtx_insn *);
-extern void init_deps (struct deps_desc *, bool);
-extern void init_deps_reg_last (struct deps_desc *);
-extern void free_deps (struct deps_desc *);
+extern void sched_analyze (class deps_desc *, rtx_insn *, rtx_insn *);
+extern void init_deps (class deps_desc *, bool);
+extern void init_deps_reg_last (class deps_desc *);
+extern void free_deps (class deps_desc *);
extern void init_deps_global (void);
extern void finish_deps_global (void);
-extern void deps_analyze_insn (struct deps_desc *, rtx_insn *);
-extern void remove_from_deps (struct deps_desc *, rtx_insn *);
+extern void deps_analyze_insn (class deps_desc *, rtx_insn *);
+extern void remove_from_deps (class deps_desc *, rtx_insn *);
extern void init_insn_reg_pressure_info (rtx_insn *);
extern void get_implicit_reg_pending_clobbers (HARD_REG_SET *, rtx_insn *);
@@ -1377,7 +1377,7 @@ extern void haifa_note_reg_use (int);
extern void maybe_extend_reg_info_p (void);
-extern void deps_start_bb (struct deps_desc *, rtx_insn *);
+extern void deps_start_bb (class deps_desc *, rtx_insn *);
extern enum reg_note ds_to_dt (ds_t);
extern bool deps_pools_are_empty_p (void);
@@ -1509,7 +1509,7 @@ extern void dump_rgn_dependencies_dot (const char *);
extern void free_rgn_deps (void);
extern int contributes_to_priority (rtx_insn *, rtx_insn *);
extern void extend_rgns (int *, int *, sbitmap, int *);
-extern void deps_join (struct deps_desc *, struct deps_desc *);
+extern void deps_join (class deps_desc *, class deps_desc *);
extern void rgn_setup_common_sched_info (void);
extern void rgn_setup_sched_infos (void);
diff --git a/gcc/sched-rgn.c b/gcc/sched-rgn.c
index d4001300a4b..59ee6a0a57c 100644
--- a/gcc/sched-rgn.c
+++ b/gcc/sched-rgn.c
@@ -248,7 +248,7 @@ static void compute_block_dependences (int);
static void schedule_region (int);
static void concat_insn_mem_list (rtx_insn_list *, rtx_expr_list *,
rtx_insn_list **, rtx_expr_list **);
-static void propagate_deps (int, struct deps_desc *);
+static void propagate_deps (int, class deps_desc *);
static void free_pending_lists (void);
/* Functions for construction of the control flow graph. */
@@ -2583,7 +2583,7 @@ add_branch_dependences (rtx_insn *head, rtx_insn *tail)
the variables of its predecessors. When the analysis for a bb completes,
we save the contents to the corresponding bb_deps[bb] variable. */
-static struct deps_desc *bb_deps;
+static class deps_desc *bb_deps;
static void
concat_insn_mem_list (rtx_insn_list *copy_insns,
@@ -2608,7 +2608,7 @@ concat_insn_mem_list (rtx_insn_list *copy_insns,
/* Join PRED_DEPS to the SUCC_DEPS. */
void
-deps_join (struct deps_desc *succ_deps, struct deps_desc *pred_deps)
+deps_join (class deps_desc *succ_deps, class deps_desc *pred_deps)
{
unsigned reg;
reg_set_iterator rsi;
@@ -2670,7 +2670,7 @@ deps_join (struct deps_desc *succ_deps, struct deps_desc *pred_deps)
/* After computing the dependencies for block BB, propagate the dependencies
found in TMP_DEPS to the successors of the block. */
static void
-propagate_deps (int bb, struct deps_desc *pred_deps)
+propagate_deps (int bb, class deps_desc *pred_deps)
{
basic_block block = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (bb));
edge_iterator ei;
@@ -2727,7 +2727,7 @@ static void
compute_block_dependences (int bb)
{
rtx_insn *head, *tail;
- struct deps_desc tmp_deps;
+ class deps_desc tmp_deps;
tmp_deps = bb_deps[bb];
@@ -3351,7 +3351,7 @@ sched_rgn_compute_dependencies (int rgn)
init_deps_global ();
/* Initializations for region data dependence analysis. */
- bb_deps = XNEWVEC (struct deps_desc, current_nr_blocks);
+ bb_deps = XNEWVEC (class deps_desc, current_nr_blocks);
for (bb = 0; bb < current_nr_blocks; bb++)
init_deps (bb_deps + bb, false);
diff --git a/gcc/sel-sched-ir.c b/gcc/sel-sched-ir.c
index 6dec1beaa04..bb8016bb530 100644
--- a/gcc/sel-sched-ir.c
+++ b/gcc/sel-sched-ir.c
@@ -61,7 +61,7 @@ struct succs_info current_succs;
static struct common_sched_info_def sel_common_sched_info;
/* The loop nest being pipelined. */
-struct loop *current_loop_nest;
+class loop *current_loop_nest;
/* LOOP_NESTS is a vector containing the corresponding loop nest for
each region. */
@@ -424,7 +424,7 @@ reset_target_context (tc_t tc, bool clean_p)
}
/* Functions to work with dependence contexts.
- Dc (aka deps context, aka deps_t, aka struct deps_desc *) is short for dependence
+ Dc (aka deps context, aka deps_t, aka class deps_desc *) is short for dependence
context. It accumulates information about processed insns to decide if
current insn is dependent on the processed ones. */
@@ -440,7 +440,7 @@ copy_deps_context (deps_t to, deps_t from)
static deps_t
alloc_deps_context (void)
{
- return XNEW (struct deps_desc);
+ return XNEW (class deps_desc);
}
/* Allocate and initialize dep context. */
@@ -2749,7 +2749,7 @@ init_id_from_df (idata_t id, insn_t insn, bool force_unique_p)
static void
deps_init_id (idata_t id, insn_t insn, bool force_unique_p)
{
- struct deps_desc _dc, *dc = &_dc;
+ class deps_desc _dc, *dc = &_dc;
deps_init_id_data.where = DEPS_IN_NOWHERE;
deps_init_id_data.id = id;
@@ -3390,7 +3390,7 @@ has_dependence_p (expr_t expr, insn_t pred, ds_t **has_dep_pp)
{
int i;
ds_t ds;
- struct deps_desc *dc;
+ class deps_desc *dc;
if (INSN_SIMPLEJUMP_P (pred))
/* Unconditional jump is just a transfer of control flow.
@@ -5397,7 +5397,7 @@ change_loops_latches (basic_block from, basic_block to)
if (current_loop_nest)
{
- struct loop *loop;
+ class loop *loop;
for (loop = current_loop_nest; loop; loop = loop_outer (loop))
if (considered_for_pipelining_p (loop) && loop->latch == from)
@@ -6002,11 +6002,11 @@ bb_top_order_comparator (const void *x, const void *y)
/* Create a region for LOOP and return its number. If we don't want
to pipeline LOOP, return -1. */
static int
-make_region_from_loop (struct loop *loop)
+make_region_from_loop (class loop *loop)
{
unsigned int i;
int new_rgn_number = -1;
- struct loop *inner;
+ class loop *inner;
/* Basic block index, to be assigned to BLOCK_TO_BB. */
int bb_ord_index = 0;
@@ -6095,9 +6095,9 @@ make_region_from_loop_preheader (vec<basic_block> *&loop_blocks)
pipelined before outer loops. Returns true when a region for LOOP
is created. */
static bool
-make_regions_from_loop_nest (struct loop *loop)
+make_regions_from_loop_nest (class loop *loop)
{
- struct loop *cur_loop;
+ class loop *cur_loop;
int rgn_number;
/* Traverse all inner nodes of the loop. */
@@ -6133,7 +6133,7 @@ sel_init_pipelining (void)
recompute_rev_top_order ();
}
-/* Returns a struct loop for region RGN. */
+/* Returns a class loop for region RGN. */
loop_p
get_loop_nest_for_rgn (unsigned int rgn)
{
@@ -6147,7 +6147,7 @@ get_loop_nest_for_rgn (unsigned int rgn)
/* True when LOOP was included into pipelining regions. */
bool
-considered_for_pipelining_p (struct loop *loop)
+considered_for_pipelining_p (class loop *loop)
{
if (loop_depth (loop) == 0)
return false;
@@ -6249,7 +6249,7 @@ make_regions_from_the_rest (void)
/* Free data structures used in pipelining of loops. */
void sel_finish_pipelining (void)
{
- struct loop *loop;
+ class loop *loop;
/* Release aux fields so we don't free them later by mistake. */
FOR_EACH_LOOP (loop, 0)
@@ -6324,7 +6324,7 @@ sel_is_loop_preheader_p (basic_block bb)
{
if (current_loop_nest)
{
- struct loop *outer;
+ class loop *outer;
if (preheader_removed)
return false;
diff --git a/gcc/sel-sched-ir.h b/gcc/sel-sched-ir.h
index 2a24a920991..b5824aec369 100644
--- a/gcc/sel-sched-ir.h
+++ b/gcc/sel-sched-ir.h
@@ -746,7 +746,7 @@ public:
htab_t transformed_insns;
/* A context incapsulating this insn. */
- struct deps_desc deps_context;
+ class deps_desc deps_context;
/* This field is initialized at the beginning of scheduling and is used
to handle sched group instructions. If it is non-null, then it points
@@ -775,7 +775,7 @@ public:
BOOL_BITFIELD after_stall_p : 1;
};
-typedef struct _sel_insn_data sel_insn_data_def;
+typedef class _sel_insn_data sel_insn_data_def;
typedef sel_insn_data_def *sel_insn_data_t;
extern vec<sel_insn_data_def> s_i_d;
@@ -954,7 +954,7 @@ extern vec<sel_region_bb_info_def> sel_region_bb_info;
extern bitmap_head *forced_ebb_heads;
/* The loop nest being pipelined. */
-extern struct loop *current_loop_nest;
+extern class loop *current_loop_nest;
/* Saves pipelined blocks. Bitmap is indexed by bb->index. */
extern sbitmap bbs_pipelined;
@@ -1043,7 +1043,7 @@ extern bool in_current_region_p (basic_block);
static inline bool
inner_loop_header_p (basic_block bb)
{
- struct loop *inner_loop;
+ class loop *inner_loop;
if (!current_loop_nest)
return false;
@@ -1069,7 +1069,7 @@ inner_loop_header_p (basic_block bb)
/* Return exit edges of LOOP, filtering out edges with the same dest bb. */
static inline vec<edge>
-get_loop_exit_edges_unique_dests (const struct loop *loop)
+get_loop_exit_edges_unique_dests (const class loop *loop)
{
vec<edge> edges = vNULL;
struct loop_exit *exit;
@@ -1142,8 +1142,8 @@ get_all_loop_exits (basic_block bb)
/* And now check whether we should skip over inner loop. */
if (inner_loop_header_p (bb))
{
- struct loop *this_loop;
- struct loop *pred_loop = NULL;
+ class loop *this_loop;
+ class loop *pred_loop = NULL;
int i;
unsigned this_depth;
edge e;
@@ -1642,7 +1642,7 @@ extern void sel_init_pipelining (void);
extern void sel_finish_pipelining (void);
extern void sel_sched_region (int);
extern loop_p get_loop_nest_for_rgn (unsigned int);
-extern bool considered_for_pipelining_p (struct loop *);
+extern bool considered_for_pipelining_p (class loop *);
extern void make_region_from_loop_preheader (vec<basic_block> *&);
extern void sel_add_loop_preheaders (bb_vec_t *);
extern bool sel_is_loop_preheader_p (basic_block);
diff --git a/gcc/selftest.h b/gcc/selftest.h
index 6152ef54782..75b2cd836e1 100644
--- a/gcc/selftest.h
+++ b/gcc/selftest.h
@@ -150,7 +150,7 @@ class auto_fix_quotes
The following struct describes a particular case within our test
matrix. */
-struct line_table_case;
+class line_table_case;
/* A class for overriding the global "line_table" within a selftest,
restoring its value afterwards. At most one instance of this
diff --git a/gcc/sese.c b/gcc/sese.c
index e9047311602..4b3065a2ee0 100644
--- a/gcc/sese.c
+++ b/gcc/sese.c
@@ -149,7 +149,7 @@ sese_build_liveouts (sese_info_p region)
sese_info_p
new_sese_info (edge entry, edge exit)
{
- sese_info_p region = XNEW (struct sese_info_t);
+ sese_info_p region = XNEW (class sese_info_t);
region->region.entry = entry;
region->region.exit = exit;
@@ -217,10 +217,10 @@ sese_insert_phis_for_liveouts (sese_info_p region, basic_block bb,
/* Returns the outermost loop in SCOP that contains BB. */
-struct loop *
+class loop *
outermost_loop_in_sese_1 (sese_l &region, basic_block bb)
{
- struct loop *nest;
+ class loop *nest;
nest = bb->loop_father;
while (loop_outer (nest)
diff --git a/gcc/sese.h b/gcc/sese.h
index 0319da6508f..6a62bb6c9bf 100644
--- a/gcc/sese.h
+++ b/gcc/sese.h
@@ -101,7 +101,7 @@ public:
extern sese_info_p new_sese_info (edge, edge);
extern void free_sese_info (sese_info_p);
extern void sese_insert_phis_for_liveouts (sese_info_p, basic_block, edge, edge);
-extern struct loop *outermost_loop_in_sese (sese_l &, basic_block);
+extern class loop *outermost_loop_in_sese (sese_l &, basic_block);
extern tree scalar_evolution_in_region (const sese_l &, loop_p, tree);
extern bool scev_analyzable_p (tree, sese_l &);
extern bool invariant_in_sese_p_rec (tree, const sese_l &, bool *);
@@ -156,7 +156,7 @@ defined_in_sese_p (tree name, const sese_l &r)
/* Returns true when LOOP is in REGION. */
static inline bool
-loop_in_sese_p (struct loop *loop, const sese_l &region)
+loop_in_sese_p (class loop *loop, const sese_l &region)
{
return (bb_in_sese_p (loop->header, region)
&& bb_in_sese_p (loop->latch, region));
@@ -272,7 +272,7 @@ typedef struct gimple_poly_bb
/* Return the innermost loop that contains the basic block GBB. */
-static inline struct loop *
+static inline class loop *
gbb_loop (gimple_poly_bb_p gbb)
{
return GBB_BB (gbb)->loop_father;
diff --git a/gcc/sreal.c b/gcc/sreal.c
index b991652c04e..5418481758c 100644
--- a/gcc/sreal.c
+++ b/gcc/sreal.c
@@ -258,7 +258,7 @@ sreal::stream_out (struct output_block *ob)
/* Read sreal value from IB. */
sreal
-sreal::stream_in (struct lto_input_block *ib)
+sreal::stream_in (class lto_input_block *ib)
{
sreal val;
val.m_sig = streamer_read_hwi (ib);
diff --git a/gcc/sreal.h b/gcc/sreal.h
index aa3327b9fe7..cb363d4392a 100644
--- a/gcc/sreal.h
+++ b/gcc/sreal.h
@@ -34,7 +34,7 @@ along with GCC; see the file COPYING3. If not see
#define SREAL_ABS(v) (v < 0 ? -v: v)
struct output_block;
-struct lto_input_block;
+class lto_input_block;
/* Structure for holding a simple real number. */
class sreal
@@ -53,7 +53,7 @@ public:
int64_t to_int () const;
double to_double () const;
void stream_out (struct output_block *);
- static sreal stream_in (struct lto_input_block *);
+ static sreal stream_in (class lto_input_block *);
sreal operator+ (const sreal &other) const;
sreal operator- (const sreal &other) const;
sreal operator* (const sreal &other) const;
diff --git a/gcc/streamer-hooks.h b/gcc/streamer-hooks.h
index 2a45513ca22..14d158e8e9a 100644
--- a/gcc/streamer-hooks.h
+++ b/gcc/streamer-hooks.h
@@ -25,8 +25,8 @@ along with GCC; see the file COPYING3. If not see
/* Forward declarations to avoid including unnecessary headers. */
struct output_block;
-struct lto_input_block;
-struct data_in;
+class lto_input_block;
+class data_in;
/* Streamer hooks. These functions do additional processing as
needed by the module. There are two types of callbacks, those that
@@ -49,10 +49,10 @@ struct streamer_hooks {
to the buffer where to read from and a data_in instance with tables
and descriptors needed by the unpickling routines. It returns the
tree instantiated from the stream. */
- tree (*read_tree) (struct lto_input_block *, struct data_in *);
+ tree (*read_tree) (class lto_input_block *, class data_in *);
/* [REQ] Called by every streaming routine that needs to read a location. */
- void (*input_location) (location_t *, struct bitpack_d *, struct data_in *);
+ void (*input_location) (location_t *, struct bitpack_d *, class data_in *);
/* [REQ] Called by every streaming routine that needs to write a location. */
void (*output_location) (struct output_block *, struct bitpack_d *, location_t);
diff --git a/gcc/target-globals.c b/gcc/target-globals.c
index 94a465c9179..8928fc19fd3 100644
--- a/gcc/target-globals.c
+++ b/gcc/target-globals.c
@@ -42,7 +42,7 @@ along with GCC; see the file COPYING3. If not see
#include "lower-subreg.h"
#if SWITCHABLE_TARGET
-struct target_globals default_target_globals = {
+class target_globals default_target_globals = {
&default_target_flag_state,
&default_target_regs,
&default_target_rtl,
@@ -61,11 +61,11 @@ struct target_globals default_target_globals = {
&default_target_lower_subreg
};
-struct target_globals *
+class target_globals *
save_target_globals (void)
{
- struct target_globals *g = ggc_cleared_alloc <target_globals> ();
- g->flag_state = XCNEW (struct target_flag_state);
+ class target_globals *g = ggc_cleared_alloc <target_globals> ();
+ g->flag_state = XCNEW (class target_flag_state);
g->regs = XCNEW (struct target_regs);
g->rtl = ggc_cleared_alloc<target_rtl> ();
g->recog = XCNEW (struct target_recog);
@@ -76,7 +76,7 @@ save_target_globals (void)
g->libfuncs = ggc_cleared_alloc<target_libfuncs> ();
g->cfgloop = XCNEW (struct target_cfgloop);
g->ira = XCNEW (struct target_ira);
- g->ira_int = XCNEW (struct target_ira_int);
+ g->ira_int = XCNEW (class target_ira_int);
g->builtins = XCNEW (struct target_builtins);
g->gcse = XCNEW (struct target_gcse);
g->bb_reorder = XCNEW (struct target_bb_reorder);
@@ -91,10 +91,10 @@ save_target_globals (void)
correctly when a previous function has changed
*this_target_optabs. */
-struct target_globals *
+class target_globals *
save_target_globals_default_opts ()
{
- struct target_globals *globals;
+ class target_globals *globals;
if (optimization_current_node != optimization_default_node)
{
diff --git a/gcc/target-globals.h b/gcc/target-globals.h
index 57cb42f320e..ceb216a6a9b 100644
--- a/gcc/target-globals.h
+++ b/gcc/target-globals.h
@@ -21,7 +21,7 @@ along with GCC; see the file COPYING3. If not see
#define TARGET_GLOBALS_H 1
#if SWITCHABLE_TARGET
-extern struct target_flag_state *this_target_flag_state;
+extern class target_flag_state *this_target_flag_state;
extern struct target_regs *this_target_regs;
extern struct target_rtl *this_target_rtl;
extern struct target_recog *this_target_recog;
@@ -32,7 +32,7 @@ extern struct target_optabs *this_target_optabs;
extern struct target_libfuncs *this_target_libfuncs;
extern struct target_cfgloop *this_target_cfgloop;
extern struct target_ira *this_target_ira;
-extern struct target_ira_int *this_target_ira_int;
+extern class target_ira_int *this_target_ira_int;
extern struct target_builtins *this_target_builtins;
extern struct target_gcse *this_target_gcse;
extern struct target_bb_reorder *this_target_bb_reorder;
@@ -43,7 +43,7 @@ class GTY(()) target_globals {
public:
~target_globals ();
- struct target_flag_state *GTY((skip)) flag_state;
+ class target_flag_state *GTY((skip)) flag_state;
struct target_regs *GTY((skip)) regs;
struct target_rtl *rtl;
struct target_recog *GTY((skip)) recog;
@@ -54,7 +54,7 @@ public:
struct target_libfuncs *libfuncs;
struct target_cfgloop *GTY((skip)) cfgloop;
struct target_ira *GTY((skip)) ira;
- struct target_ira_int *GTY((skip)) ira_int;
+ class target_ira_int *GTY((skip)) ira_int;
struct target_builtins *GTY((skip)) builtins;
struct target_gcse *GTY((skip)) gcse;
struct target_bb_reorder *GTY((skip)) bb_reorder;
@@ -62,13 +62,13 @@ public:
};
#if SWITCHABLE_TARGET
-extern struct target_globals default_target_globals;
+extern class target_globals default_target_globals;
-extern struct target_globals *save_target_globals (void);
-extern struct target_globals *save_target_globals_default_opts (void);
+extern class target_globals *save_target_globals (void);
+extern class target_globals *save_target_globals_default_opts (void);
static inline void
-restore_target_globals (struct target_globals *g)
+restore_target_globals (class target_globals *g)
{
this_target_flag_state = g->flag_state;
this_target_regs = g->regs;
diff --git a/gcc/target.def b/gcc/target.def
index 41654054ad8..7cc0f37a0d1 100644
--- a/gcc/target.def
+++ b/gcc/target.def
@@ -1970,7 +1970,7 @@ DEFHOOK
"non-NULL, it identifies the loop being vectorized; otherwise a single block "
"is being vectorized.",
void *,
- (struct loop *loop_info),
+ (class loop *loop_info),
default_init_cost)
/* Target function to record N statements of the given kind using the
@@ -1987,7 +1987,7 @@ DEFHOOK
"revised.",
unsigned,
(void *data, int count, enum vect_cost_for_stmt kind,
- struct _stmt_vec_info *stmt_info, int misalign,
+ class _stmt_vec_info *stmt_info, int misalign,
enum vect_cost_model_location where),
default_add_stmt_cost)
@@ -2665,7 +2665,7 @@ the loop is to be unrolled. The parameter @var{loop} is a pointer to\n\
the loop, which is going to be checked for unrolling. This target hook\n\
is required only when the target has special constraints like maximum\n\
number of memory accesses.",
- unsigned, (unsigned nunroll, struct loop *loop),
+ unsigned, (unsigned nunroll, class loop *loop),
NULL)
/* True if X is a legitimate MODE-mode immediate operand. */
@@ -3182,7 +3182,7 @@ DEFHOOK
version of this hook assumes the system C library errno location\
is either a declaration of type int or accessed by dereferencing\
a pointer to int.",
- bool, (struct ao_ref *ref),
+ bool, (ao_ref *ref),
default_ref_may_alias_errno)
/* Support for named address spaces. */
@@ -4243,7 +4243,7 @@ for a particular loop. The parameter @var{loop} is a pointer to the loop.\n\
This target hook is required only when the target supports low-overhead\n\
loops, and will help ivopts to make some decisions.\n\
The default version of this hook returns false.",
- bool, (struct loop *loop),
+ bool, (class loop *loop),
default_predict_doloop_p)
DEFHOOK
diff --git a/gcc/target.h b/gcc/target.h
index 008932b5dbd..633e38469a9 100644
--- a/gcc/target.h
+++ b/gcc/target.h
@@ -138,20 +138,20 @@ struct _dep;
struct ddg;
/* This is defined in cfgloop.h . */
-struct loop;
+class loop;
/* This is defined in ifcvt.h. */
struct noce_if_info;
/* This is defined in tree-ssa-alias.h. */
-struct ao_ref;
+class ao_ref;
/* This is defined in tree-vectorizer.h. */
-struct _stmt_vec_info;
+class _stmt_vec_info;
/* These are defined in tree-vect-stmts.c. */
-extern tree stmt_vectype (struct _stmt_vec_info *);
-extern bool stmt_in_inner_loop_p (struct _stmt_vec_info *);
+extern tree stmt_vectype (class _stmt_vec_info *);
+extern bool stmt_in_inner_loop_p (class _stmt_vec_info *);
/* Assembler instructions for creating various kinds of integer object. */
diff --git a/gcc/targhooks.c b/gcc/targhooks.c
index 676885bb32e..fa797b445a7 100644
--- a/gcc/targhooks.c
+++ b/gcc/targhooks.c
@@ -651,7 +651,7 @@ default_has_ifunc_p (void)
if the target can take advantage of it. */
bool
-default_predict_doloop_p (struct loop *loop ATTRIBUTE_UNUSED)
+default_predict_doloop_p (class loop *loop ATTRIBUTE_UNUSED)
{
return false;
}
@@ -1366,7 +1366,7 @@ default_empty_mask_is_expensive (unsigned ifn)
array of three unsigned ints, set it to zero, and return its address. */
void *
-default_init_cost (struct loop *loop_info ATTRIBUTE_UNUSED)
+default_init_cost (class loop *loop_info ATTRIBUTE_UNUSED)
{
unsigned *cost = XNEWVEC (unsigned, 3);
cost[vect_prologue] = cost[vect_body] = cost[vect_epilogue] = 0;
@@ -1379,7 +1379,7 @@ default_init_cost (struct loop *loop_info ATTRIBUTE_UNUSED)
unsigned
default_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
- struct _stmt_vec_info *stmt_info, int misalign,
+ class _stmt_vec_info *stmt_info, int misalign,
enum vect_cost_model_location where)
{
unsigned *cost = (unsigned *) data;
diff --git a/gcc/targhooks.h b/gcc/targhooks.h
index 50b03ce3aa0..ca2e37d093b 100644
--- a/gcc/targhooks.h
+++ b/gcc/targhooks.h
@@ -85,7 +85,7 @@ extern bool default_fixed_point_supported_p (void);
extern bool default_has_ifunc_p (void);
-extern bool default_predict_doloop_p (struct loop *);
+extern bool default_predict_doloop_p (class loop *);
extern const char * default_invalid_within_doloop (const rtx_insn *);
extern tree default_builtin_vectorized_function (unsigned int, tree, tree);
@@ -114,9 +114,9 @@ extern machine_mode default_split_reduction (machine_mode);
extern void default_autovectorize_vector_sizes (vector_sizes *, bool);
extern opt_machine_mode default_get_mask_mode (poly_uint64, poly_uint64);
extern bool default_empty_mask_is_expensive (unsigned);
-extern void *default_init_cost (struct loop *);
+extern void *default_init_cost (class loop *);
extern unsigned default_add_stmt_cost (void *, int, enum vect_cost_for_stmt,
- struct _stmt_vec_info *, int,
+ class _stmt_vec_info *, int,
enum vect_cost_model_location);
extern void default_finish_cost (void *, unsigned *, unsigned *, unsigned *);
extern void default_destroy_cost_data (void *);
@@ -188,7 +188,7 @@ extern bool default_target_option_valid_attribute_p (tree, tree, tree, int);
extern bool default_target_option_pragma_parse (tree, tree);
extern bool default_target_can_inline_p (tree, tree);
extern bool default_valid_pointer_mode (scalar_int_mode);
-extern bool default_ref_may_alias_errno (struct ao_ref *);
+extern bool default_ref_may_alias_errno (class ao_ref *);
extern scalar_int_mode default_addr_space_pointer_mode (addr_space_t);
extern scalar_int_mode default_addr_space_address_mode (addr_space_t);
extern bool default_addr_space_valid_pointer_mode (scalar_int_mode,
diff --git a/gcc/toplev.c b/gcc/toplev.c
index 116be7be395..2567fe2e697 100644
--- a/gcc/toplev.c
+++ b/gcc/toplev.c
@@ -159,9 +159,9 @@ HOST_WIDE_INT random_seed;
the support provided depends on the backend. */
rtx stack_limit_rtx;
-struct target_flag_state default_target_flag_state;
+class target_flag_state default_target_flag_state;
#if SWITCHABLE_TARGET
-struct target_flag_state *this_target_flag_state = &default_target_flag_state;
+class target_flag_state *this_target_flag_state = &default_target_flag_state;
#else
#define this_target_flag_state (&default_target_flag_state)
#endif
diff --git a/gcc/tree-affine.c b/gcc/tree-affine.c
index ee7e010a1a5..976cf3c6cf6 100644
--- a/gcc/tree-affine.c
+++ b/gcc/tree-affine.c
@@ -675,7 +675,7 @@ aff_combination_mult (aff_tree *c1, aff_tree *c2, aff_tree *r)
element exists. If IDX is not NULL, it is set to the index of VAL in
COMB. */
-static struct aff_comb_elt *
+static class aff_comb_elt *
aff_combination_find_elt (aff_tree *comb, tree val, unsigned *idx)
{
unsigned i;
@@ -716,7 +716,7 @@ aff_combination_expand (aff_tree *comb ATTRIBUTE_UNUSED,
tree e;
gimple *def;
widest_int scale;
- struct name_expansion *exp;
+ class name_expansion *exp;
aff_combination_zero (&to_add, comb->type);
for (i = 0; i < comb->n; i++)
@@ -795,7 +795,7 @@ aff_combination_expand (aff_tree *comb ATTRIBUTE_UNUSED,
default:
continue;
}
- exp = XNEW (struct name_expansion);
+ exp = XNEW (class name_expansion);
exp->in_progress = 1;
if (!*cache)
*cache = new hash_map<tree, name_expansion *>;
@@ -932,7 +932,7 @@ aff_combination_constant_multiple_p (aff_tree *val, aff_tree *div,
for (i = 0; i < div->n; i++)
{
- struct aff_comb_elt *elt
+ class aff_comb_elt *elt
= aff_combination_find_elt (val, div->elts[i].val, NULL);
if (!elt)
return false;
diff --git a/gcc/tree-affine.h b/gcc/tree-affine.h
index 23c6096254b..216482016e2 100644
--- a/gcc/tree-affine.h
+++ b/gcc/tree-affine.h
@@ -56,7 +56,7 @@ public:
The coefficients are always sign extended from the precision of TYPE
(regardless of signedness of TYPE). */
- struct aff_comb_elt elts[MAX_AFF_ELTS];
+ class aff_comb_elt elts[MAX_AFF_ELTS];
/* Remainder of the expression. Usually NULL, used only if there are more
than MAX_AFF_ELTS elements. Type of REST will be either sizetype for
@@ -64,7 +64,7 @@ public:
tree rest;
};
-struct name_expansion;
+class name_expansion;
void aff_combination_const (aff_tree *, tree, const poly_widest_int &);
void aff_combination_elt (aff_tree *, tree, tree);
diff --git a/gcc/tree-cfg.c b/gcc/tree-cfg.c
index 0396aa9bdd3..5c67d024608 100644
--- a/gcc/tree-cfg.c
+++ b/gcc/tree-cfg.c
@@ -256,7 +256,7 @@ build_gimple_cfg (gimple_seq seq)
come immediately before the condition in BB, if any. */
static void
-replace_loop_annotate_in_block (basic_block bb, struct loop *loop)
+replace_loop_annotate_in_block (basic_block bb, class loop *loop)
{
gimple_stmt_iterator gsi = gsi_last_bb (bb);
gimple *stmt = gsi_stmt (gsi);
@@ -311,7 +311,7 @@ replace_loop_annotate_in_block (basic_block bb, struct loop *loop)
static void
replace_loop_annotate (void)
{
- struct loop *loop;
+ class loop *loop;
basic_block bb;
gimple_stmt_iterator gsi;
gimple *stmt;
@@ -1996,7 +1996,7 @@ replace_uses_by (tree name, tree val)
/* Also update the trees stored in loop structures. */
if (current_loops)
{
- struct loop *loop;
+ class loop *loop;
FOR_EACH_LOOP (loop, 0)
{
@@ -2223,7 +2223,7 @@ remove_bb (basic_block bb)
if (current_loops)
{
- struct loop *loop = bb->loop_father;
+ class loop *loop = bb->loop_father;
/* If a loop gets removed, clean up the information associated
with it. */
@@ -2547,7 +2547,7 @@ dump_cfg_stats (FILE *file)
num_edges = 0;
FOR_EACH_BB_FN (bb, cfun)
num_edges += EDGE_COUNT (bb->succs);
- size = num_edges * sizeof (struct edge_def);
+ size = num_edges * sizeof (class edge_def);
total += size;
fprintf (file, fmt_str_2, "Edges", num_edges, SIZE_AMOUNT (size));
@@ -6383,7 +6383,7 @@ gimple_duplicate_sese_region (edge entry, edge exit,
{
unsigned i;
bool free_region_copy = false, copying_header = false;
- struct loop *loop = entry->dest->loop_father;
+ class loop *loop = entry->dest->loop_father;
edge exit_copy;
vec<basic_block> doms = vNULL;
edge redirected;
@@ -6549,8 +6549,8 @@ gimple_duplicate_sese_tail (edge entry, edge exit,
{
unsigned i;
bool free_region_copy = false;
- struct loop *loop = exit->dest->loop_father;
- struct loop *orig_loop = entry->dest->loop_father;
+ class loop *loop = exit->dest->loop_father;
+ class loop *orig_loop = entry->dest->loop_father;
basic_block switch_bb, entry_bb, nentry_bb;
vec<basic_block> doms;
profile_count total_count = profile_count::uninitialized (),
@@ -6563,7 +6563,7 @@ gimple_duplicate_sese_tail (edge entry, edge exit,
gphi_iterator psi;
gphi *phi;
tree def;
- struct loop *target, *aloop, *cloop;
+ class loop *target, *aloop, *cloop;
gcc_assert (EDGE_COUNT (exit->src->succs) == 2);
exits[0] = exit;
@@ -7040,7 +7040,7 @@ move_block_to_fn (struct function *dest_cfun, basic_block bb,
/* Move BB from its current loop to the copy in the new function. */
if (current_loops)
{
- struct loop *new_loop = (struct loop *)bb->loop_father->aux;
+ class loop *new_loop = (class loop *)bb->loop_father->aux;
if (new_loop)
bb->loop_father = new_loop;
}
@@ -7304,7 +7304,7 @@ replace_block_vars_by_duplicates (tree block, hash_map<tree, tree> *vars_map,
static void
fixup_loop_arrays_after_move (struct function *fn1, struct function *fn2,
- struct loop *loop)
+ class loop *loop)
{
/* Discard it from the old loop array. */
(*get_loops (fn1))[loop->num] = NULL;
@@ -7464,8 +7464,8 @@ move_sese_region_to_fn (struct function *dest_cfun, basic_block entry_bb,
edge_iterator ei;
htab_t new_label_map;
hash_map<void *, void *> *eh_map;
- struct loop *loop = entry_bb->loop_father;
- struct loop *loop0 = get_loop (saved_cfun, 0);
+ class loop *loop = entry_bb->loop_father;
+ class loop *loop0 = get_loop (saved_cfun, 0);
struct move_stmt_d d;
/* If ENTRY does not strictly dominate EXIT, this cannot be an SESE
@@ -7573,8 +7573,8 @@ move_sese_region_to_fn (struct function *dest_cfun, basic_block entry_bb,
{
if (bb->loop_father->header == bb)
{
- struct loop *this_loop = bb->loop_father;
- struct loop *outer = loop_outer (this_loop);
+ class loop *this_loop = bb->loop_father;
+ class loop *outer = loop_outer (this_loop);
if (outer == loop
/* If the SESE region contains some bbs ending with
a noreturn call, those are considered to belong
@@ -7614,7 +7614,7 @@ move_sese_region_to_fn (struct function *dest_cfun, basic_block entry_bb,
/* Fix up orig_loop_num. If the block referenced in it has been moved
to dest_cfun, update orig_loop_num field, otherwise clear it. */
- struct loop *dloop;
+ class loop *dloop;
signed char *moved_orig_loop_num = NULL;
FOR_EACH_LOOP_FN (dest_cfun, dloop, 0)
if (dloop->orig_loop_num)
@@ -7722,14 +7722,14 @@ move_sese_region_to_fn (struct function *dest_cfun, basic_block entry_bb,
loop0->aux = NULL;
/* Loop sizes are no longer correct, fix them up. */
loop->num_nodes -= num_nodes;
- for (struct loop *outer = loop_outer (loop);
+ for (class loop *outer = loop_outer (loop);
outer; outer = loop_outer (outer))
outer->num_nodes -= num_nodes;
loop0->num_nodes -= bbs.length () - num_nodes;
if (saved_cfun->has_simduid_loops || saved_cfun->has_force_vectorize_loops)
{
- struct loop *aloop;
+ class loop *aloop;
for (i = 0; vec_safe_iterate (loops->larray, i, &aloop); i++)
if (aloop != NULL)
{
@@ -8180,14 +8180,14 @@ print_loops_bb (FILE *file, basic_block bb, int indent, int verbosity)
}
}
-static void print_loop_and_siblings (FILE *, struct loop *, int, int);
+static void print_loop_and_siblings (FILE *, class loop *, int, int);
/* Pretty print LOOP on FILE, indented INDENT spaces. Following
VERBOSITY level this outputs the contents of the loop, or just its
structure. */
static void
-print_loop (FILE *file, struct loop *loop, int indent, int verbosity)
+print_loop (FILE *file, class loop *loop, int indent, int verbosity)
{
char *s_indent;
basic_block bb;
@@ -8253,7 +8253,7 @@ print_loop (FILE *file, struct loop *loop, int indent, int verbosity)
loop, or just its structure. */
static void
-print_loop_and_siblings (FILE *file, struct loop *loop, int indent,
+print_loop_and_siblings (FILE *file, class loop *loop, int indent,
int verbosity)
{
if (loop == NULL)
@@ -8280,13 +8280,13 @@ print_loops (FILE *file, int verbosity)
/* Dump a loop. */
DEBUG_FUNCTION void
-debug (struct loop &ref)
+debug (class loop &ref)
{
print_loop (stderr, &ref, 0, /*verbosity*/0);
}
DEBUG_FUNCTION void
-debug (struct loop *ptr)
+debug (class loop *ptr)
{
if (ptr)
debug (*ptr);
@@ -8297,13 +8297,13 @@ debug (struct loop *ptr)
/* Dump a loop verbosely. */
DEBUG_FUNCTION void
-debug_verbose (struct loop &ref)
+debug_verbose (class loop &ref)
{
print_loop (stderr, &ref, 0, /*verbosity*/3);
}
DEBUG_FUNCTION void
-debug_verbose (struct loop *ptr)
+debug_verbose (class loop *ptr)
{
if (ptr)
debug (*ptr);
@@ -8323,7 +8323,7 @@ debug_loops (int verbosity)
/* Print on stderr the code of LOOP, at some VERBOSITY level. */
DEBUG_FUNCTION void
-debug_loop (struct loop *loop, int verbosity)
+debug_loop (class loop *loop, int verbosity)
{
print_loop (stderr, loop, 0, verbosity);
}
diff --git a/gcc/tree-cfg.h b/gcc/tree-cfg.h
index 836f8e8af51..732916177fa 100644
--- a/gcc/tree-cfg.h
+++ b/gcc/tree-cfg.h
@@ -83,12 +83,12 @@ extern void dump_function_to_file (tree, FILE *, dump_flags_t);
extern void debug_function (tree, dump_flags_t);
extern void print_loops_bb (FILE *, basic_block, int, int);
extern void print_loops (FILE *, int);
-extern void debug (struct loop &ref);
-extern void debug (struct loop *ptr);
-extern void debug_verbose (struct loop &ref);
-extern void debug_verbose (struct loop *ptr);
+extern void debug (class loop &ref);
+extern void debug (class loop *ptr);
+extern void debug_verbose (class loop &ref);
+extern void debug_verbose (class loop *ptr);
extern void debug_loops (int);
-extern void debug_loop (struct loop *, int);
+extern void debug_loop (class loop *, int);
extern void debug_loop_num (unsigned, int);
extern void remove_edge_and_dominated_blocks (edge);
extern bool gimple_purge_dead_eh_edges (basic_block);
diff --git a/gcc/tree-chrec.c b/gcc/tree-chrec.c
index f50fd2012e1..5ed62260993 100644
--- a/gcc/tree-chrec.c
+++ b/gcc/tree-chrec.c
@@ -52,8 +52,8 @@ chrec_fold_plus_poly_poly (enum tree_code code,
tree poly1)
{
tree left, right;
- struct loop *loop0 = get_chrec_loop (poly0);
- struct loop *loop1 = get_chrec_loop (poly1);
+ class loop *loop0 = get_chrec_loop (poly0);
+ class loop *loop1 = get_chrec_loop (poly1);
tree rtype = code == POINTER_PLUS_EXPR ? chrec_type (poly1) : type;
gcc_assert (poly0);
@@ -144,8 +144,8 @@ chrec_fold_multiply_poly_poly (tree type,
{
tree t0, t1, t2;
int var;
- struct loop *loop0 = get_chrec_loop (poly0);
- struct loop *loop1 = get_chrec_loop (poly1);
+ class loop *loop0 = get_chrec_loop (poly0);
+ class loop *loop1 = get_chrec_loop (poly1);
gcc_assert (poly0);
gcc_assert (poly1);
@@ -539,7 +539,7 @@ chrec_evaluate (unsigned var, tree chrec, tree n, unsigned int k)
{
tree arg0, arg1, binomial_n_k;
tree type = TREE_TYPE (chrec);
- struct loop *var_loop = get_loop (cfun, var);
+ class loop *var_loop = get_loop (cfun, var);
while (TREE_CODE (chrec) == POLYNOMIAL_CHREC
&& flow_loop_nested_p (var_loop, get_chrec_loop (chrec)))
@@ -720,7 +720,7 @@ tree
hide_evolution_in_other_loops_than_loop (tree chrec,
unsigned loop_num)
{
- struct loop *loop = get_loop (cfun, loop_num), *chloop;
+ class loop *loop = get_loop (cfun, loop_num), *chloop;
if (automatically_generated_chrec_p (chrec))
return chrec;
@@ -761,7 +761,7 @@ chrec_component_in_loop_num (tree chrec,
bool right)
{
tree component;
- struct loop *loop = get_loop (cfun, loop_num), *chloop;
+ class loop *loop = get_loop (cfun, loop_num), *chloop;
if (automatically_generated_chrec_p (chrec))
return chrec;
@@ -843,7 +843,7 @@ reset_evolution_in_loop (unsigned loop_num,
tree chrec,
tree new_evol)
{
- struct loop *loop = get_loop (cfun, loop_num);
+ class loop *loop = get_loop (cfun, loop_num);
if (POINTER_TYPE_P (chrec_type (chrec)))
gcc_assert (ptrofftype_p (chrec_type (new_evol)));
@@ -939,7 +939,7 @@ is_multivariate_chrec (const_tree chrec)
static bool
chrec_contains_symbols (const_tree chrec, hash_set<const_tree> &visited,
- struct loop *loop)
+ class loop *loop)
{
int i, n;
@@ -977,7 +977,7 @@ chrec_contains_symbols (const_tree chrec, hash_set<const_tree> &visited,
the chrec is considered as a SYMBOL. */
bool
-chrec_contains_symbols (const_tree chrec, struct loop* loop)
+chrec_contains_symbols (const_tree chrec, class loop* loop)
{
hash_set<const_tree> visited;
return chrec_contains_symbols (chrec, visited, loop);
@@ -1296,7 +1296,7 @@ nb_vars_in_chrec (tree chrec)
the conversion succeeded, false otherwise. */
bool
-convert_affine_scev (struct loop *loop, tree type,
+convert_affine_scev (class loop *loop, tree type,
tree *base, tree *step, gimple *at_stmt,
bool use_overflow_semantics, tree from)
{
@@ -1427,7 +1427,7 @@ chrec_convert_1 (tree type, tree chrec, gimple *at_stmt,
{
tree ct, res;
tree base, step;
- struct loop *loop;
+ class loop *loop;
if (automatically_generated_chrec_p (chrec))
return chrec;
@@ -1563,7 +1563,7 @@ chrec_convert_aggressive (tree type, tree chrec, bool *fold_conversions)
if (!*fold_conversions && evolution_function_is_affine_p (chrec))
{
tree base, step;
- struct loop *loop;
+ class loop *loop;
loop = get_chrec_loop (chrec);
base = CHREC_LEFT (chrec);
diff --git a/gcc/tree-chrec.h b/gcc/tree-chrec.h
index 3b5c090ef93..423d8fba9d3 100644
--- a/gcc/tree-chrec.h
+++ b/gcc/tree-chrec.h
@@ -77,13 +77,13 @@ extern tree hide_evolution_in_other_loops_than_loop (tree, unsigned);
extern tree reset_evolution_in_loop (unsigned, tree, tree);
extern tree chrec_merge (tree, tree);
extern void for_each_scev_op (tree *, bool (*) (tree *, void *), void *);
-extern bool convert_affine_scev (struct loop *, tree, tree *, tree *, gimple *,
+extern bool convert_affine_scev (class loop *, tree, tree *, tree *, gimple *,
bool, tree = NULL);
/* Observers. */
extern bool eq_evolutions_p (const_tree, const_tree);
extern bool is_multivariate_chrec (const_tree);
-extern bool chrec_contains_symbols (const_tree, struct loop * = NULL);
+extern bool chrec_contains_symbols (const_tree, class loop * = NULL);
extern bool chrec_contains_symbols_defined_in_loop (const_tree, unsigned);
extern bool chrec_contains_undetermined (const_tree);
extern bool tree_contains_chrecs (const_tree, int *);
diff --git a/gcc/tree-core.h b/gcc/tree-core.h
index 9ea8638c3c0..b5dde47da4f 100644
--- a/gcc/tree-core.h
+++ b/gcc/tree-core.h
@@ -1908,7 +1908,7 @@ struct GTY(()) tree_optimization_option {
/* Forward declaration, defined in target-globals.h. */
-struct GTY(()) target_globals;
+class GTY(()) target_globals;
/* Target options used by a function. */
@@ -1916,7 +1916,7 @@ struct GTY(()) tree_target_option {
struct tree_base base;
/* Target globals for the corresponding target option. */
- struct target_globals *globals;
+ class target_globals *globals;
/* The optimization options used by the user. */
struct cl_target_option *opts;
diff --git a/gcc/tree-data-ref.c b/gcc/tree-data-ref.c
index 4dc03efd1de..df1a7b8016e 100644
--- a/gcc/tree-data-ref.c
+++ b/gcc/tree-data-ref.c
@@ -127,7 +127,7 @@ static struct datadep_stats
static bool subscript_dependence_tester_1 (struct data_dependence_relation *,
unsigned int, unsigned int,
- struct loop *);
+ class loop *);
/* Returns true iff A divides B. */
static inline bool
@@ -448,7 +448,7 @@ dump_data_dependence_relation (FILE *outf,
else if (DDR_ARE_DEPENDENT (ddr) == NULL_TREE)
{
unsigned int i;
- struct loop *loopi;
+ class loop *loopi;
subscript *sub;
FOR_EACH_VEC_ELT (DDR_SUBSCRIPTS (ddr), i, sub)
@@ -873,7 +873,7 @@ canonicalize_base_object_address (tree addr)
opt_result
dr_analyze_innermost (innermost_loop_behavior *drb, tree ref,
- struct loop *loop, const gimple *stmt)
+ class loop *loop, const gimple *stmt)
{
poly_int64 pbitsize, pbitpos;
tree base, poffset;
@@ -1351,7 +1351,7 @@ data_ref_compare_tree (tree t1, tree t2)
check. */
opt_result
-runtime_alias_check_p (ddr_p ddr, struct loop *loop, bool speed_p)
+runtime_alias_check_p (ddr_p ddr, class loop *loop, bool speed_p)
{
if (dump_enabled_p ())
dump_printf (MSG_NOTE,
@@ -1624,7 +1624,7 @@ prune_runtime_alias_test_list (vec<dr_with_seg_len_pair_t> *alias_pairs,
Note evolution step of index needs to be considered in comparison. */
static bool
-create_intersect_range_checks_index (struct loop *loop, tree *cond_expr,
+create_intersect_range_checks_index (class loop *loop, tree *cond_expr,
const dr_with_seg_len& dr_a,
const dr_with_seg_len& dr_b)
{
@@ -1858,7 +1858,7 @@ get_segment_min_max (const dr_with_seg_len &d, tree *seg_min_out,
|| (DR_B_addr_0 + DER_B_segment_length_0) <= DR_A_addr_0)) */
static void
-create_intersect_range_checks (struct loop *loop, tree *cond_expr,
+create_intersect_range_checks (class loop *loop, tree *cond_expr,
const dr_with_seg_len& dr_a,
const dr_with_seg_len& dr_b)
{
@@ -1917,7 +1917,7 @@ create_intersect_range_checks (struct loop *loop, tree *cond_expr,
that controls which version of the loop gets executed at runtime. */
void
-create_runtime_alias_checks (struct loop *loop,
+create_runtime_alias_checks (class loop *loop,
vec<dr_with_seg_len_pair_t> *alias_pairs,
tree * cond_expr)
{
@@ -2197,7 +2197,7 @@ conflict_fn_no_dependence (void)
/* Returns true if the address of OBJ is invariant in LOOP. */
static bool
-object_address_invariant_in_loop_p (const struct loop *loop, const_tree obj)
+object_address_invariant_in_loop_p (const class loop *loop, const_tree obj)
{
while (handled_component_p (obj))
{
@@ -2231,7 +2231,7 @@ object_address_invariant_in_loop_p (const struct loop *loop, const_tree obj)
bool
dr_may_alias_p (const struct data_reference *a, const struct data_reference *b,
- struct loop *loop_nest)
+ class loop *loop_nest)
{
tree addr_a = DR_BASE_OBJECT (a);
tree addr_b = DR_BASE_OBJECT (b);
@@ -2892,7 +2892,7 @@ analyze_ziv_subscript (tree chrec_a,
chrec_dont_know. */
static tree
-max_stmt_executions_tree (struct loop *loop)
+max_stmt_executions_tree (class loop *loop)
{
widest_int nit;
@@ -3046,7 +3046,7 @@ analyze_siv_subscript_cst_affine (tree chrec_a,
if (tree_fold_divides_p (CHREC_RIGHT (chrec_b), difference))
{
HOST_WIDE_INT numiter;
- struct loop *loop = get_chrec_loop (chrec_b);
+ class loop *loop = get_chrec_loop (chrec_b);
*overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
tmp = fold_build2 (EXACT_DIV_EXPR, type,
@@ -3127,7 +3127,7 @@ analyze_siv_subscript_cst_affine (tree chrec_a,
if (tree_fold_divides_p (CHREC_RIGHT (chrec_b), difference))
{
HOST_WIDE_INT numiter;
- struct loop *loop = get_chrec_loop (chrec_b);
+ class loop *loop = get_chrec_loop (chrec_b);
*overlaps_a = conflict_fn (1, affine_fn_cst (integer_zero_node));
tmp = fold_build2 (EXACT_DIV_EXPR, type, difference,
@@ -4023,7 +4023,7 @@ analyze_miv_subscript (tree chrec_a,
conflict_function **overlaps_a,
conflict_function **overlaps_b,
tree *last_conflicts,
- struct loop *loop_nest)
+ class loop *loop_nest)
{
tree type, difference;
@@ -4125,7 +4125,7 @@ analyze_overlapping_iterations (tree chrec_a,
tree chrec_b,
conflict_function **overlap_iterations_a,
conflict_function **overlap_iterations_b,
- tree *last_conflicts, struct loop *loop_nest)
+ tree *last_conflicts, class loop *loop_nest)
{
unsigned int lnn = loop_nest->num;
@@ -4275,7 +4275,7 @@ build_classic_dist_vector_1 (struct data_dependence_relation *ddr,
{
unsigned i;
lambda_vector init_v = lambda_vector_new (DDR_NB_LOOPS (ddr));
- struct loop *loop = DDR_LOOP_NEST (ddr)[0];
+ class loop *loop = DDR_LOOP_NEST (ddr)[0];
for (i = 0; i < DDR_NUM_SUBSCRIPTS (ddr); i++)
{
@@ -4426,7 +4426,7 @@ add_other_self_distances (struct data_dependence_relation *ddr)
unsigned i;
int index_carry = DDR_NB_LOOPS (ddr);
subscript *sub;
- struct loop *loop = DDR_LOOP_NEST (ddr)[0];
+ class loop *loop = DDR_LOOP_NEST (ddr)[0];
FOR_EACH_VEC_ELT (DDR_SUBSCRIPTS (ddr), i, sub)
{
@@ -4546,7 +4546,7 @@ same_access_functions (const struct data_dependence_relation *ddr)
static bool
build_classic_dist_vector (struct data_dependence_relation *ddr,
- struct loop *loop_nest)
+ class loop *loop_nest)
{
bool init_b = false;
int index_carry = DDR_NB_LOOPS (ddr);
@@ -4733,7 +4733,7 @@ build_classic_dir_vector (struct data_dependence_relation *ddr)
static bool
subscript_dependence_tester_1 (struct data_dependence_relation *ddr,
unsigned int a_index, unsigned int b_index,
- struct loop *loop_nest)
+ class loop *loop_nest)
{
unsigned int i;
tree last_conflicts;
@@ -4792,7 +4792,7 @@ subscript_dependence_tester_1 (struct data_dependence_relation *ddr,
static void
subscript_dependence_tester (struct data_dependence_relation *ddr,
- struct loop *loop_nest)
+ class loop *loop_nest)
{
if (subscript_dependence_tester_1 (ddr, 0, 1, loop_nest))
dependence_stats.num_dependence_dependent++;
@@ -4807,7 +4807,7 @@ subscript_dependence_tester (struct data_dependence_relation *ddr,
static bool
access_functions_are_affine_or_constant_p (const struct data_reference *a,
- const struct loop *loop_nest)
+ const class loop *loop_nest)
{
unsigned int i;
vec<tree> fns = DR_ACCESS_FNS (a);
@@ -4832,7 +4832,7 @@ access_functions_are_affine_or_constant_p (const struct data_reference *a,
void
compute_affine_dependence (struct data_dependence_relation *ddr,
- struct loop *loop_nest)
+ class loop *loop_nest)
{
struct data_reference *dra = DDR_A (ddr);
struct data_reference *drb = DDR_B (ddr);
@@ -4975,7 +4975,7 @@ get_references_in_stmt (gimple *stmt, vec<data_ref_loc, va_heap> *references)
{
case IFN_GOMP_SIMD_LANE:
{
- struct loop *loop = gimple_bb (stmt)->loop_father;
+ class loop *loop = gimple_bb (stmt)->loop_father;
tree uid = gimple_call_arg (stmt, 0);
gcc_assert (TREE_CODE (uid) == SSA_NAME);
if (loop == NULL
@@ -5117,7 +5117,7 @@ loop_nest_has_data_refs (loop_p loop)
loop of the loop nest in which the references should be analyzed. */
opt_result
-find_data_references_in_stmt (struct loop *nest, gimple *stmt,
+find_data_references_in_stmt (class loop *nest, gimple *stmt,
vec<data_reference_p> *datarefs)
{
unsigned i;
@@ -5176,7 +5176,7 @@ graphite_find_data_references_in_stmt (edge nest, loop_p loop, gimple *stmt,
difficult case, returns NULL_TREE otherwise. */
tree
-find_data_references_in_bb (struct loop *loop, basic_block bb,
+find_data_references_in_bb (class loop *loop, basic_block bb,
vec<data_reference_p> *datarefs)
{
gimple_stmt_iterator bsi;
@@ -5206,7 +5206,7 @@ find_data_references_in_bb (struct loop *loop, basic_block bb,
arithmetic as if they were array accesses, etc. */
tree
-find_data_references_in_loop (struct loop *loop,
+find_data_references_in_loop (class loop *loop,
vec<data_reference_p> *datarefs)
{
basic_block bb, *bbs;
@@ -5331,7 +5331,7 @@ get_base_for_alignment (tree addr, unsigned int *max_alignment)
/* Recursive helper function. */
static bool
-find_loop_nest_1 (struct loop *loop, vec<loop_p> *loop_nest)
+find_loop_nest_1 (class loop *loop, vec<loop_p> *loop_nest)
{
/* Inner loops of the nest should not contain siblings. Example:
when there are two consecutive loops,
@@ -5362,7 +5362,7 @@ find_loop_nest_1 (struct loop *loop, vec<loop_p> *loop_nest)
appear in the classic distance vector. */
bool
-find_loop_nest (struct loop *loop, vec<loop_p> *loop_nest)
+find_loop_nest (class loop *loop, vec<loop_p> *loop_nest)
{
loop_nest->safe_push (loop);
if (loop->inner)
@@ -5378,7 +5378,7 @@ find_loop_nest (struct loop *loop, vec<loop_p> *loop_nest)
COMPUTE_SELF_AND_READ_READ_DEPENDENCES is TRUE. */
bool
-compute_data_dependences_for_loop (struct loop *loop,
+compute_data_dependences_for_loop (class loop *loop,
bool compute_self_and_read_read_dependences,
vec<loop_p> *loop_nest,
vec<data_reference_p> *datarefs,
diff --git a/gcc/tree-data-ref.h b/gcc/tree-data-ref.h
index d7cd98d589d..998937fef68 100644
--- a/gcc/tree-data-ref.h
+++ b/gcc/tree-data-ref.h
@@ -421,8 +421,8 @@ typedef struct data_dependence_relation *ddr_p;
opt_result dr_analyze_innermost (innermost_loop_behavior *, tree,
- struct loop *, const gimple *);
-extern bool compute_data_dependences_for_loop (struct loop *, bool,
+ class loop *, const gimple *);
+extern bool compute_data_dependences_for_loop (class loop *, bool,
vec<loop_p> *,
vec<data_reference_p> *,
vec<ddr_p> *);
@@ -443,15 +443,15 @@ extern void free_dependence_relation (struct data_dependence_relation *);
extern void free_dependence_relations (vec<ddr_p> );
extern void free_data_ref (data_reference_p);
extern void free_data_refs (vec<data_reference_p> );
-extern opt_result find_data_references_in_stmt (struct loop *, gimple *,
+extern opt_result find_data_references_in_stmt (class loop *, gimple *,
vec<data_reference_p> *);
extern bool graphite_find_data_references_in_stmt (edge, loop_p, gimple *,
vec<data_reference_p> *);
-tree find_data_references_in_loop (struct loop *, vec<data_reference_p> *);
+tree find_data_references_in_loop (class loop *, vec<data_reference_p> *);
bool loop_nest_has_data_refs (loop_p loop);
struct data_reference *create_data_ref (edge, loop_p, tree, gimple *, bool,
bool);
-extern bool find_loop_nest (struct loop *, vec<loop_p> *);
+extern bool find_loop_nest (class loop *, vec<loop_p> *);
extern struct data_dependence_relation *initialize_data_dependence_relation
(struct data_reference *, struct data_reference *, vec<loop_p>);
extern void compute_affine_dependence (struct data_dependence_relation *,
@@ -460,7 +460,7 @@ extern void compute_self_dependence (struct data_dependence_relation *);
extern bool compute_all_dependences (vec<data_reference_p> ,
vec<ddr_p> *,
vec<loop_p>, bool);
-extern tree find_data_references_in_bb (struct loop *, basic_block,
+extern tree find_data_references_in_bb (class loop *, basic_block,
vec<data_reference_p> *);
extern unsigned int dr_alignment (innermost_loop_behavior *);
extern tree get_base_for_alignment (tree, unsigned int *);
@@ -475,15 +475,15 @@ dr_alignment (data_reference *dr)
}
extern bool dr_may_alias_p (const struct data_reference *,
- const struct data_reference *, struct loop *);
+ const struct data_reference *, class loop *);
extern bool dr_equal_offsets_p (struct data_reference *,
struct data_reference *);
-extern opt_result runtime_alias_check_p (ddr_p, struct loop *, bool);
+extern opt_result runtime_alias_check_p (ddr_p, class loop *, bool);
extern int data_ref_compare_tree (tree, tree);
extern void prune_runtime_alias_test_list (vec<dr_with_seg_len_pair_t> *,
poly_uint64);
-extern void create_runtime_alias_checks (struct loop *,
+extern void create_runtime_alias_checks (class loop *,
vec<dr_with_seg_len_pair_t> *, tree*);
extern tree dr_direction_indicator (struct data_reference *);
extern tree dr_zero_step_indicator (struct data_reference *);
@@ -574,7 +574,7 @@ ddr_dependence_level (ddr_p ddr)
static inline int
index_in_loop_nest (int var, vec<loop_p> loop_nest)
{
- struct loop *loopi;
+ class loop *loopi;
int var_index;
for (var_index = 0; loop_nest.iterate (var_index, &loopi); var_index++)
diff --git a/gcc/tree-if-conv.c b/gcc/tree-if-conv.c
index 98566e3fd19..d9e540f1d5a 100644
--- a/gcc/tree-if-conv.c
+++ b/gcc/tree-if-conv.c
@@ -502,7 +502,7 @@ fold_build_cond_expr (tree type, tree cond, tree rhs, tree lhs)
cd-equivalent if they are executed under the same condition. */
static inline void
-add_to_predicate_list (struct loop *loop, basic_block bb, tree nc)
+add_to_predicate_list (class loop *loop, basic_block bb, tree nc)
{
tree bc, *tp;
basic_block dom_bb;
@@ -567,7 +567,7 @@ add_to_predicate_list (struct loop *loop, basic_block bb, tree nc)
the loop to be if-converted. */
static void
-add_to_dst_predicate_list (struct loop *loop, edge e,
+add_to_dst_predicate_list (class loop *loop, edge e,
tree prev_cond, tree cond)
{
if (!flow_bb_inside_loop_p (loop, e->dest))
@@ -584,7 +584,7 @@ add_to_dst_predicate_list (struct loop *loop, edge e,
/* Return true if one of the successor edges of BB exits LOOP. */
static bool
-bb_with_exit_edge_p (struct loop *loop, basic_block bb)
+bb_with_exit_edge_p (class loop *loop, basic_block bb)
{
edge e;
edge_iterator ei;
@@ -661,7 +661,7 @@ phi_convertible_by_degenerating_args (gphi *phi)
ANY_COMPLICATED_PHI if PHI is complicated. */
static bool
-if_convertible_phi_p (struct loop *loop, basic_block bb, gphi *phi)
+if_convertible_phi_p (class loop *loop, basic_block bb, gphi *phi)
{
if (dump_file && (dump_flags & TDF_DETAILS))
{
@@ -756,7 +756,7 @@ idx_within_array_bound (tree ref, tree *idx, void *dta)
widest_int niter, valid_niter, delta, wi_step;
tree ev, init, step;
tree low, high;
- struct loop *loop = (struct loop*) dta;
+ class loop *loop = (class loop*) dta;
/* Only support within-bound access for array references. */
if (TREE_CODE (ref) != ARRAY_REF)
@@ -822,7 +822,7 @@ idx_within_array_bound (tree ref, tree *idx, void *dta)
static bool
ref_within_array_bound (gimple *stmt, tree ref)
{
- struct loop *loop = loop_containing_stmt (stmt);
+ class loop *loop = loop_containing_stmt (stmt);
gcc_assert (loop != NULL);
return for_each_index (&ref, idx_within_array_bound, loop);
@@ -1128,7 +1128,7 @@ all_preds_critical_p (basic_block bb)
inside LOOP. */
static bool
-if_convertible_bb_p (struct loop *loop, basic_block bb, basic_block exit_bb)
+if_convertible_bb_p (class loop *loop, basic_block bb, basic_block exit_bb)
{
edge e;
edge_iterator ei;
@@ -1197,7 +1197,7 @@ pred_blocks_visited_p (basic_block bb, bitmap *visited)
predecessors are already selected. */
static basic_block *
-get_loop_body_in_if_conv_order (const struct loop *loop)
+get_loop_body_in_if_conv_order (const class loop *loop)
{
basic_block *blocks, *blocks_in_bfs_order;
basic_block bb;
@@ -1344,7 +1344,7 @@ predicate_bbs (loop_p loop)
/* Build region by adding loop pre-header and post-header blocks. */
static vec<basic_block>
-build_region (struct loop *loop)
+build_region (class loop *loop)
{
vec<basic_block> region = vNULL;
basic_block exit_bb = NULL;
@@ -1378,7 +1378,7 @@ build_region (struct loop *loop)
in if_convertible_loop_p. */
static bool
-if_convertible_loop_p_1 (struct loop *loop, vec<data_reference_p> *refs)
+if_convertible_loop_p_1 (class loop *loop, vec<data_reference_p> *refs)
{
unsigned int i;
basic_block exit_bb = NULL;
@@ -1518,7 +1518,7 @@ if_convertible_loop_p_1 (struct loop *loop, vec<data_reference_p> *refs)
- if its basic blocks and phi nodes are if convertible. */
static bool
-if_convertible_loop_p (struct loop *loop)
+if_convertible_loop_p (class loop *loop)
{
edge e;
edge_iterator ei;
@@ -1597,7 +1597,7 @@ is_cond_scalar_reduction (gimple *phi, gimple **reduc, tree arg_0, tree arg_1,
gimple *header_phi = NULL;
enum tree_code reduction_op;
basic_block bb = gimple_bb (phi);
- struct loop *loop = bb->loop_father;
+ class loop *loop = bb->loop_father;
edge latch_e = loop_latch_edge (loop);
imm_use_iterator imm_iter;
use_operand_p use_p;
@@ -2004,7 +2004,7 @@ predicate_scalar_phi (gphi *phi, gimple_stmt_iterator *gsi)
LOOP->header block with conditional modify expressions. */
static void
-predicate_all_scalar_phis (struct loop *loop)
+predicate_all_scalar_phis (class loop *loop)
{
basic_block bb;
unsigned int orig_loop_num_nodes = loop->num_nodes;
@@ -2526,7 +2526,7 @@ remove_conditions_and_labels (loop_p loop)
blocks. Replace PHI nodes with conditional modify expressions. */
static void
-combine_blocks (struct loop *loop)
+combine_blocks (class loop *loop)
{
basic_block bb, exit_bb, merge_target_bb;
unsigned int orig_loop_num_nodes = loop->num_nodes;
@@ -2719,12 +2719,12 @@ combine_blocks (struct loop *loop)
out of LOOP_VECTORIZED must have 100% probability so the profile remains
consistent after the condition is folded in the vectorizer. */
-static struct loop *
-version_loop_for_if_conversion (struct loop *loop, vec<gimple *> *preds)
+static class loop *
+version_loop_for_if_conversion (class loop *loop, vec<gimple *> *preds)
{
basic_block cond_bb;
tree cond = make_ssa_name (boolean_type_node);
- struct loop *new_loop;
+ class loop *new_loop;
gimple *g;
gimple_stmt_iterator gsi;
unsigned int save_length;
@@ -2781,7 +2781,7 @@ version_loop_for_if_conversion (struct loop *loop, vec<gimple *> *preds)
inner loop's exit block. */
static bool
-versionable_outer_loop_p (struct loop *loop)
+versionable_outer_loop_p (class loop *loop)
{
if (!loop_outer (loop)
|| loop->dont_vectorize
@@ -2815,7 +2815,7 @@ versionable_outer_loop_p (struct loop *loop)
Last restriction is valid only if AGGRESSIVE_IF_CONV is false. */
static bool
-ifcvt_split_critical_edges (struct loop *loop, bool aggressive_if_conv)
+ifcvt_split_critical_edges (class loop *loop, bool aggressive_if_conv)
{
basic_block *body;
basic_block bb;
@@ -2982,11 +2982,11 @@ ifcvt_local_dce (basic_block bb)
changed. */
unsigned int
-tree_if_conversion (struct loop *loop, vec<gimple *> *preds)
+tree_if_conversion (class loop *loop, vec<gimple *> *preds)
{
unsigned int todo = 0;
bool aggressive_if_conv;
- struct loop *rloop;
+ class loop *rloop;
bitmap exit_bbs;
again:
@@ -3001,7 +3001,7 @@ tree_if_conversion (struct loop *loop, vec<gimple *> *preds)
aggressive_if_conv = loop->force_vectorize;
if (!aggressive_if_conv)
{
- struct loop *outer_loop = loop_outer (loop);
+ class loop *outer_loop = loop_outer (loop);
if (outer_loop && outer_loop->force_vectorize)
aggressive_if_conv = true;
}
@@ -3027,10 +3027,10 @@ tree_if_conversion (struct loop *loop, vec<gimple *> *preds)
|| any_complicated_phi
|| flag_tree_loop_if_convert != 1)
{
- struct loop *vloop
+ class loop *vloop
= (versionable_outer_loop_p (loop_outer (loop))
? loop_outer (loop) : loop);
- struct loop *nloop = version_loop_for_if_conversion (vloop, preds);
+ class loop *nloop = version_loop_for_if_conversion (vloop, preds);
if (nloop == NULL)
goto cleanup;
if (vloop != loop)
@@ -3138,7 +3138,7 @@ pass_if_conversion::gate (function *fun)
unsigned int
pass_if_conversion::execute (function *fun)
{
- struct loop *loop;
+ class loop *loop;
unsigned todo = 0;
if (number_of_loops (fun) <= 1)
diff --git a/gcc/tree-if-conv.h b/gcc/tree-if-conv.h
index c136ebb17ff..a83380b8a69 100644
--- a/gcc/tree-if-conv.h
+++ b/gcc/tree-if-conv.h
@@ -19,6 +19,6 @@ along with GCC; see the file COPYING3. If not see
#ifndef GCC_TREE_IF_CONV_H
#define GCC_TREE_IF_CONV_H
-unsigned int tree_if_conversion (struct loop *, vec<gimple *> * = NULL);
+unsigned int tree_if_conversion (class loop *, vec<gimple *> * = NULL);
#endif /* GCC_TREE_IF_CONV_H */
diff --git a/gcc/tree-inline.c b/gcc/tree-inline.c
index 42e45978ee5..4311309acce 100644
--- a/gcc/tree-inline.c
+++ b/gcc/tree-inline.c
@@ -2804,15 +2804,15 @@ maybe_move_debug_stmts_to_successors (copy_body_data *id, basic_block new_bb)
static void
copy_loops (copy_body_data *id,
- struct loop *dest_parent, struct loop *src_parent)
+ class loop *dest_parent, class loop *src_parent)
{
- struct loop *src_loop = src_parent->inner;
+ class loop *src_loop = src_parent->inner;
while (src_loop)
{
if (!id->blocks_to_copy
|| bitmap_bit_p (id->blocks_to_copy, src_loop->header->index))
{
- struct loop *dest_loop = alloc_loop ();
+ class loop *dest_loop = alloc_loop ();
/* Assign the new loop its header and latch and associate
those with the new loop. */
diff --git a/gcc/tree-loop-distribution.c b/gcc/tree-loop-distribution.c
index 3881aaff9e5..81784866ad1 100644
--- a/gcc/tree-loop-distribution.c
+++ b/gcc/tree-loop-distribution.c
@@ -506,7 +506,7 @@ bb_top_order_cmp (const void *x, const void *y)
statements in loop copies. */
static void
-stmts_from_loop (struct loop *loop, vec<gimple *> *stmts)
+stmts_from_loop (class loop *loop, vec<gimple *> *stmts)
{
unsigned int i;
basic_block *bbs = get_loop_body_in_custom_order (loop, bb_top_order_cmp);
@@ -564,7 +564,7 @@ free_rdg (struct graph *rdg)
collected and recorded in global data DATAREFS_VEC. */
static struct graph *
-build_rdg (struct loop *loop, control_dependences *cd)
+build_rdg (class loop *loop, control_dependences *cd)
{
struct graph *rdg;
@@ -787,10 +787,10 @@ stmt_has_scalar_dependences_outside_loop (loop_p loop, gimple *stmt)
/* Return a copy of LOOP placed before LOOP. */
-static struct loop *
-copy_loop_before (struct loop *loop)
+static class loop *
+copy_loop_before (class loop *loop)
{
- struct loop *res;
+ class loop *res;
edge preheader = loop_preheader_edge (loop);
initialize_original_copy_tables ();
@@ -805,7 +805,7 @@ copy_loop_before (struct loop *loop)
/* Creates an empty basic block after LOOP. */
static void
-create_bb_after_loop (struct loop *loop)
+create_bb_after_loop (class loop *loop)
{
edge exit = single_exit (loop);
@@ -822,7 +822,7 @@ create_bb_after_loop (struct loop *loop)
basic blocks of a loop are taken in dom order. */
static void
-generate_loops_for_partition (struct loop *loop, partition *partition,
+generate_loops_for_partition (class loop *loop, partition *partition,
bool copy_p)
{
unsigned i;
@@ -994,7 +994,7 @@ const_with_all_bytes_same (tree val)
/* Generate a call to memset for PARTITION in LOOP. */
static void
-generate_memset_builtin (struct loop *loop, partition *partition)
+generate_memset_builtin (class loop *loop, partition *partition)
{
gimple_stmt_iterator gsi;
tree mem, fn, nb_bytes;
@@ -1048,7 +1048,7 @@ generate_memset_builtin (struct loop *loop, partition *partition)
/* Generate a call to memcpy for PARTITION in LOOP. */
static void
-generate_memcpy_builtin (struct loop *loop, partition *partition)
+generate_memcpy_builtin (class loop *loop, partition *partition)
{
gimple_stmt_iterator gsi;
gimple *fn_call;
@@ -1092,7 +1092,7 @@ generate_memcpy_builtin (struct loop *loop, partition *partition)
/* Remove and destroy the loop LOOP. */
static void
-destroy_loop (struct loop *loop)
+destroy_loop (class loop *loop)
{
unsigned nbbs = loop->num_nodes;
edge exit = single_exit (loop);
@@ -1169,7 +1169,7 @@ destroy_loop (struct loop *loop)
/* Generates code for PARTITION. Return whether LOOP needs to be destroyed. */
static bool
-generate_code_for_partition (struct loop *loop,
+generate_code_for_partition (class loop *loop,
partition *partition, bool copy_p)
{
switch (partition->kind)
@@ -1346,7 +1346,7 @@ build_rdg_partition_for_vertex (struct graph *rdg, int v)
data references. */
static bool
-find_single_drs (struct loop *loop, struct graph *rdg, partition *partition,
+find_single_drs (class loop *loop, struct graph *rdg, partition *partition,
data_reference_p *dst_dr, data_reference_p *src_dr)
{
unsigned i;
@@ -1469,7 +1469,7 @@ compute_access_range (loop_p loop_nest, data_reference_p dr, tree *base,
{
location_t loc = gimple_location (DR_STMT (dr));
basic_block bb = gimple_bb (DR_STMT (dr));
- struct loop *loop = bb->loop_father;
+ class loop *loop = bb->loop_father;
tree ref = DR_REF (dr);
tree access_base = build_fold_addr_expr (ref);
tree access_size = TYPE_SIZE_UNIT (TREE_TYPE (ref));
@@ -2426,7 +2426,7 @@ data_ref_segment_size (struct data_reference *dr, tree niters)
DR. */
static inline bool
-latch_dominated_by_data_ref (struct loop *loop, data_reference *dr)
+latch_dominated_by_data_ref (class loop *loop, data_reference *dr)
{
return dominated_by_p (CDI_DOMINATORS, single_exit (loop)->src,
gimple_bb (DR_STMT (dr)));
@@ -2436,7 +2436,7 @@ latch_dominated_by_data_ref (struct loop *loop, data_reference *dr)
data dependence relations ALIAS_DDRS. */
static void
-compute_alias_check_pairs (struct loop *loop, vec<ddr_p> *alias_ddrs,
+compute_alias_check_pairs (class loop *loop, vec<ddr_p> *alias_ddrs,
vec<dr_with_seg_len_pair_t> *comp_alias_pairs)
{
unsigned int i;
@@ -2508,11 +2508,11 @@ compute_alias_check_pairs (struct loop *loop, vec<ddr_p> *alias_ddrs,
static void
version_loop_by_alias_check (vec<struct partition *> *partitions,
- struct loop *loop, vec<ddr_p> *alias_ddrs)
+ class loop *loop, vec<ddr_p> *alias_ddrs)
{
profile_probability prob;
basic_block cond_bb;
- struct loop *nloop;
+ class loop *nloop;
tree lhs, arg0, cond_expr = NULL_TREE;
gimple_seq cond_stmts = NULL;
gimple *call_stmt = NULL;
@@ -2723,7 +2723,7 @@ fuse_memset_builtins (vec<struct partition *> *partitions)
ALIAS_DDRS contains ddrs which need runtime alias check. */
static void
-finalize_partitions (struct loop *loop, vec<struct partition *> *partitions,
+finalize_partitions (class loop *loop, vec<struct partition *> *partitions,
vec<ddr_p> *alias_ddrs)
{
unsigned i;
@@ -2780,7 +2780,7 @@ finalize_partitions (struct loop *loop, vec<struct partition *> *partitions,
Set *DESTROY_P to whether LOOP needs to be destroyed. */
static int
-distribute_loop (struct loop *loop, vec<gimple *> stmts,
+distribute_loop (class loop *loop, vec<gimple *> stmts,
control_dependences *cd, int *nb_calls, bool *destroy_p,
bool only_patterns_p)
{
@@ -3060,7 +3060,7 @@ public:
WORK_LIST. Return false if there is nothing for distribution. */
static bool
-find_seed_stmts_for_distribution (struct loop *loop, vec<gimple *> *work_list)
+find_seed_stmts_for_distribution (class loop *loop, vec<gimple *> *work_list)
{
basic_block *bbs = get_loop_body_in_dom_order (loop);
@@ -3114,10 +3114,10 @@ find_seed_stmts_for_distribution (struct loop *loop, vec<gimple *> *work_list)
/* Given innermost LOOP, return the outermost enclosing loop that forms a
perfect loop nest. */
-static struct loop *
-prepare_perfect_loop_nest (struct loop *loop)
+static class loop *
+prepare_perfect_loop_nest (class loop *loop)
{
- struct loop *outer = loop_outer (loop);
+ class loop *outer = loop_outer (loop);
tree niters = number_of_latch_executions (loop);
/* TODO: We only support the innermost 3-level loop nest distribution
@@ -3143,7 +3143,7 @@ prepare_perfect_loop_nest (struct loop *loop)
unsigned int
pass_loop_distribution::execute (function *fun)
{
- struct loop *loop;
+ class loop *loop;
bool changed = false;
basic_block bb;
control_dependences *cd = NULL;
diff --git a/gcc/tree-parloops.c b/gcc/tree-parloops.c
index 6b8c8cd5b75..f5cb411f087 100644
--- a/gcc/tree-parloops.c
+++ b/gcc/tree-parloops.c
@@ -412,7 +412,7 @@ lambda_transform_legal_p (lambda_trans_matrix trans,
in parallel). */
static bool
-loop_parallel_p (struct loop *loop, struct obstack * parloop_obstack)
+loop_parallel_p (class loop *loop, struct obstack * parloop_obstack)
{
vec<ddr_p> dependence_relations;
vec<data_reference_p> datarefs;
@@ -468,7 +468,7 @@ loop_parallel_p (struct loop *loop, struct obstack * parloop_obstack)
BB_IRREDUCIBLE_LOOP flag. */
static inline bool
-loop_has_blocks_with_irreducible_flag (struct loop *loop)
+loop_has_blocks_with_irreducible_flag (class loop *loop)
{
unsigned i;
basic_block *bbs = get_loop_body_in_dom_order (loop);
@@ -572,7 +572,7 @@ reduc_stmt_res (gimple *stmt)
the loop described in DATA. */
int
-initialize_reductions (reduction_info **slot, struct loop *loop)
+initialize_reductions (reduction_info **slot, class loop *loop)
{
tree init;
tree type, arg;
@@ -1034,7 +1034,7 @@ add_field_for_name (name_to_copy_elt **slot, tree type)
reduction's data structure. */
int
-create_phi_for_local_result (reduction_info **slot, struct loop *loop)
+create_phi_for_local_result (reduction_info **slot, class loop *loop)
{
struct reduction_info *const reduc = *slot;
edge e;
@@ -1158,11 +1158,11 @@ create_call_for_reduction_1 (reduction_info **slot, struct clsn_data *clsn_data)
LD_ST_DATA describes the shared data structure where
shared data is stored in and loaded from. */
static void
-create_call_for_reduction (struct loop *loop,
+create_call_for_reduction (class loop *loop,
reduction_info_table_type *reduction_list,
struct clsn_data *ld_st_data)
{
- reduction_list->traverse <struct loop *, create_phi_for_local_result> (loop);
+ reduction_list->traverse <class loop *, create_phi_for_local_result> (loop);
/* Find the fallthru edge from GIMPLE_OMP_CONTINUE. */
basic_block continue_bb = single_pred (loop->latch);
ld_st_data->load_bb = FALLTHRU_EDGE (continue_bb)->dest;
@@ -1640,7 +1640,7 @@ replace_uses_in_bb_by (tree name, tree val, basic_block bb)
bound. */
static void
-transform_to_exit_first_loop_alt (struct loop *loop,
+transform_to_exit_first_loop_alt (class loop *loop,
reduction_info_table_type *reduction_list,
tree bound)
{
@@ -1797,7 +1797,7 @@ transform_to_exit_first_loop_alt (struct loop *loop,
transformation is successful. */
static bool
-try_transform_to_exit_first_loop_alt (struct loop *loop,
+try_transform_to_exit_first_loop_alt (class loop *loop,
reduction_info_table_type *reduction_list,
tree nit)
{
@@ -1916,7 +1916,7 @@ try_transform_to_exit_first_loop_alt (struct loop *loop,
LOOP. */
static void
-transform_to_exit_first_loop (struct loop *loop,
+transform_to_exit_first_loop (class loop *loop,
reduction_info_table_type *reduction_list,
tree nit)
{
@@ -2030,7 +2030,7 @@ transform_to_exit_first_loop (struct loop *loop,
that number is to be determined later. */
static void
-create_parallel_loop (struct loop *loop, tree loop_fn, tree data,
+create_parallel_loop (class loop *loop, tree loop_fn, tree data,
tree new_data, unsigned n_threads, location_t loc,
bool oacc_kernels_p)
{
@@ -2266,9 +2266,9 @@ num_phis (basic_block bb, bool count_virtual_p)
REDUCTION_LIST describes the reductions existent in the LOOP. */
static void
-gen_parallel_loop (struct loop *loop,
+gen_parallel_loop (class loop *loop,
reduction_info_table_type *reduction_list,
- unsigned n_threads, struct tree_niter_desc *niter,
+ unsigned n_threads, class tree_niter_desc *niter,
bool oacc_kernels_p)
{
tree many_iterations_cond, type, nit;
@@ -2441,7 +2441,7 @@ gen_parallel_loop (struct loop *loop,
/* Generate initializations for reductions. */
if (!reduction_list->is_empty ())
- reduction_list->traverse <struct loop *, initialize_reductions> (loop);
+ reduction_list->traverse <class loop *, initialize_reductions> (loop);
/* Eliminate the references to local variables from the loop. */
gcc_assert (single_exit (loop));
@@ -2489,7 +2489,7 @@ gen_parallel_loop (struct loop *loop,
/* Returns true when LOOP contains vector phi nodes. */
static bool
-loop_has_vector_phi_nodes (struct loop *loop ATTRIBUTE_UNUSED)
+loop_has_vector_phi_nodes (class loop *loop ATTRIBUTE_UNUSED)
{
unsigned i;
basic_block *bbs = get_loop_body_in_dom_order (loop);
@@ -2695,7 +2695,7 @@ gather_scalar_reductions (loop_p loop, reduction_info_table_type *reduction_list
/* Try to initialize NITER for code generation part. */
static bool
-try_get_loop_niter (loop_p loop, struct tree_niter_desc *niter)
+try_get_loop_niter (loop_p loop, class tree_niter_desc *niter)
{
edge exit = single_dom_exit (loop);
@@ -2737,7 +2737,7 @@ get_omp_data_i_param (void)
and return addr. Otherwise, return NULL_TREE. */
static tree
-find_reduc_addr (struct loop *loop, gphi *phi)
+find_reduc_addr (class loop *loop, gphi *phi)
{
edge e = loop_preheader_edge (loop);
tree arg = PHI_ARG_DEF_FROM_EDGE (phi, e);
@@ -2907,7 +2907,7 @@ try_create_reduction_list (loop_p loop,
/* Return true if LOOP contains phis with ADDR_EXPR in args. */
static bool
-loop_has_phi_with_address_arg (struct loop *loop)
+loop_has_phi_with_address_arg (class loop *loop)
{
basic_block *bbs = get_loop_body (loop);
bool res = false;
@@ -3244,7 +3244,7 @@ oacc_entry_exit_single_gang (bitmap in_loop_bbs, vec<basic_block> region_bbs,
outside LOOP by guarding them such that only a single gang executes them. */
static bool
-oacc_entry_exit_ok (struct loop *loop,
+oacc_entry_exit_ok (class loop *loop,
reduction_info_table_type *reduction_list)
{
basic_block *loop_bbs = get_loop_body_in_dom_order (loop);
@@ -3289,9 +3289,9 @@ parallelize_loops (bool oacc_kernels_p)
{
unsigned n_threads;
bool changed = false;
- struct loop *loop;
- struct loop *skip_loop = NULL;
- struct tree_niter_desc niter_desc;
+ class loop *loop;
+ class loop *skip_loop = NULL;
+ class tree_niter_desc niter_desc;
struct obstack parloop_obstack;
HOST_WIDE_INT estimated;
diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h
index 7b832938503..1c8df3d0a71 100644
--- a/gcc/tree-pass.h
+++ b/gcc/tree-pass.h
@@ -132,7 +132,7 @@ protected:
}
};
-class varpool_node;
+struct varpool_node;
struct cgraph_node;
struct lto_symtab_encoder_d;
diff --git a/gcc/tree-predcom.c b/gcc/tree-predcom.c
index dfb21460d58..299c45e287b 100644
--- a/gcc/tree-predcom.c
+++ b/gcc/tree-predcom.c
@@ -735,7 +735,7 @@ determine_offset (struct data_reference *a, struct data_reference *b,
it is executed whenever the loop is entered. */
static basic_block
-last_always_executed_block (struct loop *loop)
+last_always_executed_block (class loop *loop)
{
unsigned i;
vec<edge> exits = get_loop_exit_edges (loop);
@@ -752,7 +752,7 @@ last_always_executed_block (struct loop *loop)
/* Splits dependence graph on DATAREFS described by DEPENDS to components. */
static struct component *
-split_data_refs_to_components (struct loop *loop,
+split_data_refs_to_components (class loop *loop,
vec<data_reference_p> datarefs,
vec<ddr_p> depends)
{
@@ -896,7 +896,7 @@ split_data_refs_to_components (struct loop *loop,
comps[ca] = comp;
}
- dataref = XCNEW (struct dref_d);
+ dataref = XCNEW (class dref_d);
dataref->ref = dr;
dataref->stmt = DR_STMT (dr);
dataref->offset = 0;
@@ -931,7 +931,7 @@ end:
loop. */
static bool
-suitable_component_p (struct loop *loop, struct component *comp)
+suitable_component_p (class loop *loop, struct component *comp)
{
unsigned i;
dref a, first;
@@ -987,7 +987,7 @@ suitable_component_p (struct loop *loop, struct component *comp)
the beginning of this file. LOOP is the current loop. */
static struct component *
-filter_suitable_components (struct loop *loop, struct component *comps)
+filter_suitable_components (class loop *loop, struct component *comps)
{
struct component **comp, *act;
@@ -1232,7 +1232,7 @@ valid_initializer_p (struct data_reference *ref,
is the root of the current chain. */
static gphi *
-find_looparound_phi (struct loop *loop, dref ref, dref root)
+find_looparound_phi (class loop *loop, dref ref, dref root)
{
tree name, init, init_ref;
gphi *phi = NULL;
@@ -1296,7 +1296,7 @@ find_looparound_phi (struct loop *loop, dref ref, dref root)
static void
insert_looparound_copy (chain_p chain, dref ref, gphi *phi)
{
- dref nw = XCNEW (struct dref_d), aref;
+ dref nw = XCNEW (class dref_d), aref;
unsigned i;
nw->stmt = phi;
@@ -1321,7 +1321,7 @@ insert_looparound_copy (chain_p chain, dref ref, gphi *phi)
(also, it may allow us to combine chains together). */
static void
-add_looparound_copies (struct loop *loop, chain_p chain)
+add_looparound_copies (class loop *loop, chain_p chain)
{
unsigned i;
dref ref, root = get_chain_root (chain);
@@ -1346,7 +1346,7 @@ add_looparound_copies (struct loop *loop, chain_p chain)
loop. */
static void
-determine_roots_comp (struct loop *loop,
+determine_roots_comp (class loop *loop,
struct component *comp,
vec<chain_p> *chains)
{
@@ -1436,7 +1436,7 @@ determine_roots_comp (struct loop *loop,
separates the references to CHAINS. LOOP is the current loop. */
static void
-determine_roots (struct loop *loop,
+determine_roots (class loop *loop,
struct component *comps, vec<chain_p> *chains)
{
struct component *comp;
@@ -1653,7 +1653,7 @@ predcom_tmp_var (tree ref, unsigned i, bitmap tmp_vars)
temporary variables are marked in TMP_VARS. */
static void
-initialize_root_vars (struct loop *loop, chain_p chain, bitmap tmp_vars)
+initialize_root_vars (class loop *loop, chain_p chain, bitmap tmp_vars)
{
unsigned i;
unsigned n = chain->length;
@@ -1707,7 +1707,7 @@ initialize_root_vars (struct loop *loop, chain_p chain, bitmap tmp_vars)
In this case, we can use these invariant values directly after LOOP. */
static bool
-is_inv_store_elimination_chain (struct loop *loop, chain_p chain)
+is_inv_store_elimination_chain (class loop *loop, chain_p chain)
{
if (chain->length == 0 || chain->type != CT_STORE_STORE)
return false;
@@ -1801,7 +1801,7 @@ initialize_root_vars_store_elim_1 (chain_p chain)
of the newly created root variables are marked in TMP_VARS. */
static void
-initialize_root_vars_store_elim_2 (struct loop *loop,
+initialize_root_vars_store_elim_2 (class loop *loop,
chain_p chain, bitmap tmp_vars)
{
unsigned i, n = chain->length;
@@ -1886,7 +1886,7 @@ initialize_root_vars_store_elim_2 (struct loop *loop,
(CHAIN->length - 1) iterations. */
static void
-finalize_eliminated_stores (struct loop *loop, chain_p chain)
+finalize_eliminated_stores (class loop *loop, chain_p chain)
{
unsigned i, n = chain->length;
@@ -1914,7 +1914,7 @@ finalize_eliminated_stores (struct loop *loop, chain_p chain)
initializer. */
static void
-initialize_root_vars_lm (struct loop *loop, dref root, bool written,
+initialize_root_vars_lm (class loop *loop, dref root, bool written,
vec<tree> *vars, vec<tree> inits,
bitmap tmp_vars)
{
@@ -1962,7 +1962,7 @@ initialize_root_vars_lm (struct loop *loop, dref root, bool written,
created temporary variables are marked in TMP_VARS. */
static void
-execute_load_motion (struct loop *loop, chain_p chain, bitmap tmp_vars)
+execute_load_motion (class loop *loop, chain_p chain, bitmap tmp_vars)
{
auto_vec<tree> vars;
dref a;
@@ -2103,7 +2103,7 @@ remove_stmt (gimple *stmt)
Uids of the newly created temporary variables are marked in TMP_VARS.*/
static void
-execute_pred_commoning_chain (struct loop *loop, chain_p chain,
+execute_pred_commoning_chain (class loop *loop, chain_p chain,
bitmap tmp_vars)
{
unsigned i;
@@ -2234,7 +2234,7 @@ determine_unroll_factor (vec<chain_p> chains)
Uids of the newly created temporary variables are marked in TMP_VARS. */
static void
-execute_pred_commoning (struct loop *loop, vec<chain_p> chains,
+execute_pred_commoning (class loop *loop, vec<chain_p> chains,
bitmap tmp_vars)
{
chain_p chain;
@@ -2317,7 +2317,7 @@ struct epcc_data
};
static void
-execute_pred_commoning_cbck (struct loop *loop, void *data)
+execute_pred_commoning_cbck (class loop *loop, void *data)
{
struct epcc_data *const dta = (struct epcc_data *) data;
@@ -2333,7 +2333,7 @@ execute_pred_commoning_cbck (struct loop *loop, void *data)
the header of the LOOP. */
static void
-base_names_in_chain_on (struct loop *loop, tree name, tree var)
+base_names_in_chain_on (class loop *loop, tree name, tree var)
{
gimple *stmt, *phi;
imm_use_iterator iter;
@@ -2366,7 +2366,7 @@ base_names_in_chain_on (struct loop *loop, tree name, tree var)
for those we want to perform this. */
static void
-eliminate_temp_copies (struct loop *loop, bitmap tmp_vars)
+eliminate_temp_copies (class loop *loop, bitmap tmp_vars)
{
edge e;
gphi *phi;
@@ -2751,7 +2751,7 @@ combine_chains (chain_p ch1, chain_p ch2)
for (i = 0; (ch1->refs.iterate (i, &r1)
&& ch2->refs.iterate (i, &r2)); i++)
{
- nw = XCNEW (struct dref_d);
+ nw = XCNEW (class dref_d);
nw->stmt = stmt_combining_refs (r1, r2);
nw->distance = r1->distance;
@@ -2801,7 +2801,7 @@ pcom_stmt_dominates_stmt_p (gimple *s1, gimple *s2)
/* Try to combine the CHAINS in LOOP. */
static void
-try_combine_chains (struct loop *loop, vec<chain_p> *chains)
+try_combine_chains (class loop *loop, vec<chain_p> *chains)
{
unsigned i, j;
chain_p ch1, ch2, cch;
@@ -2911,7 +2911,7 @@ try_combine_chains (struct loop *loop, vec<chain_p> *chains)
otherwise. */
static bool
-prepare_initializers_chain_store_elim (struct loop *loop, chain_p chain)
+prepare_initializers_chain_store_elim (class loop *loop, chain_p chain)
{
unsigned i, n = chain->length;
@@ -2978,7 +2978,7 @@ prepare_initializers_chain_store_elim (struct loop *loop, chain_p chain)
impossible because one of these initializers may trap, true otherwise. */
static bool
-prepare_initializers_chain (struct loop *loop, chain_p chain)
+prepare_initializers_chain (class loop *loop, chain_p chain)
{
unsigned i, n = (chain->type == CT_INVARIANT) ? 1 : chain->length;
struct data_reference *dr = get_chain_root (chain)->ref;
@@ -3034,7 +3034,7 @@ prepare_initializers_chain (struct loop *loop, chain_p chain)
be used because the initializers might trap. */
static void
-prepare_initializers (struct loop *loop, vec<chain_p> chains)
+prepare_initializers (class loop *loop, vec<chain_p> chains)
{
chain_p chain;
unsigned i;
@@ -3056,7 +3056,7 @@ prepare_initializers (struct loop *loop, vec<chain_p> chains)
if finalizer code for CHAIN can be generated, otherwise false. */
static bool
-prepare_finalizers_chain (struct loop *loop, chain_p chain)
+prepare_finalizers_chain (class loop *loop, chain_p chain)
{
unsigned i, n = chain->length;
struct data_reference *dr = get_chain_root (chain)->ref;
@@ -3104,7 +3104,7 @@ prepare_finalizers_chain (struct loop *loop, chain_p chain)
if finalizer code generation for CHAINS breaks loop closed ssa form. */
static bool
-prepare_finalizers (struct loop *loop, vec<chain_p> chains)
+prepare_finalizers (class loop *loop, vec<chain_p> chains)
{
chain_p chain;
unsigned i;
@@ -3143,7 +3143,7 @@ prepare_finalizers (struct loop *loop, vec<chain_p> chains)
/* Insert all initializing gimple stmts into loop's entry edge. */
static void
-insert_init_seqs (struct loop *loop, vec<chain_p> chains)
+insert_init_seqs (class loop *loop, vec<chain_p> chains)
{
unsigned i;
edge entry = loop_preheader_edge (loop);
@@ -3161,14 +3161,14 @@ insert_init_seqs (struct loop *loop, vec<chain_p> chains)
form was corrupted. */
static unsigned
-tree_predictive_commoning_loop (struct loop *loop)
+tree_predictive_commoning_loop (class loop *loop)
{
vec<data_reference_p> datarefs;
vec<ddr_p> dependences;
struct component *components;
vec<chain_p> chains = vNULL;
unsigned unroll_factor;
- struct tree_niter_desc desc;
+ class tree_niter_desc desc;
bool unroll = false, loop_closed_ssa = false;
edge exit;
@@ -3304,7 +3304,7 @@ end: ;
unsigned
tree_predictive_commoning (void)
{
- struct loop *loop;
+ class loop *loop;
unsigned ret = 0, changed = 0;
initialize_original_copy_tables ();
diff --git a/gcc/tree-scalar-evolution.c b/gcc/tree-scalar-evolution.c
index 08c882edd6f..4b72a25d350 100644
--- a/gcc/tree-scalar-evolution.c
+++ b/gcc/tree-scalar-evolution.c
@@ -286,8 +286,8 @@ along with GCC; see the file COPYING3. If not see
#include "builtins.h"
#include "case-cfn-macros.h"
-static tree analyze_scalar_evolution_1 (struct loop *, tree);
-static tree analyze_scalar_evolution_for_address_of (struct loop *loop,
+static tree analyze_scalar_evolution_1 (class loop *, tree);
+static tree analyze_scalar_evolution_for_address_of (class loop *loop,
tree var);
/* The cached information about an SSA name with version NAME_VERSION,
@@ -445,7 +445,7 @@ loop_phi_node_p (gimple *phi)
*/
tree
-compute_overall_effect_of_inner_loop (struct loop *loop, tree evolution_fn)
+compute_overall_effect_of_inner_loop (class loop *loop, tree evolution_fn)
{
bool val = false;
@@ -454,7 +454,7 @@ compute_overall_effect_of_inner_loop (struct loop *loop, tree evolution_fn)
else if (TREE_CODE (evolution_fn) == POLYNOMIAL_CHREC)
{
- struct loop *inner_loop = get_chrec_loop (evolution_fn);
+ class loop *inner_loop = get_chrec_loop (evolution_fn);
if (inner_loop == loop
|| flow_loop_nested_p (loop, inner_loop))
@@ -593,7 +593,7 @@ add_to_evolution_1 (unsigned loop_nb, tree chrec_before, tree to_add,
gimple *at_stmt)
{
tree type, left, right;
- struct loop *loop = get_loop (cfun, loop_nb), *chloop;
+ class loop *loop = get_loop (cfun, loop_nb), *chloop;
switch (TREE_CODE (chrec_before))
{
@@ -840,7 +840,7 @@ add_to_evolution (unsigned loop_nb, tree chrec_before, enum tree_code code,
analyze, then give up. */
gcond *
-get_loop_exit_condition (const struct loop *loop)
+get_loop_exit_condition (const class loop *loop)
{
gcond *res = NULL;
edge exit_edge = single_exit (loop);
@@ -876,14 +876,14 @@ enum t_bool {
};
-static t_bool follow_ssa_edge (struct loop *loop, gimple *, gphi *,
+static t_bool follow_ssa_edge (class loop *loop, gimple *, gphi *,
tree *, int);
/* Follow the ssa edge into the binary expression RHS0 CODE RHS1.
Return true if the strongly connected component has been found. */
static t_bool
-follow_ssa_edge_binary (struct loop *loop, gimple *at_stmt,
+follow_ssa_edge_binary (class loop *loop, gimple *at_stmt,
tree type, tree rhs0, enum tree_code code, tree rhs1,
gphi *halting_phi, tree *evolution_of_loop,
int limit)
@@ -1018,7 +1018,7 @@ follow_ssa_edge_binary (struct loop *loop, gimple *at_stmt,
Return true if the strongly connected component has been found. */
static t_bool
-follow_ssa_edge_expr (struct loop *loop, gimple *at_stmt, tree expr,
+follow_ssa_edge_expr (class loop *loop, gimple *at_stmt, tree expr,
gphi *halting_phi, tree *evolution_of_loop,
int limit)
{
@@ -1109,7 +1109,7 @@ follow_ssa_edge_expr (struct loop *loop, gimple *at_stmt, tree expr,
Return true if the strongly connected component has been found. */
static t_bool
-follow_ssa_edge_in_rhs (struct loop *loop, gimple *stmt,
+follow_ssa_edge_in_rhs (class loop *loop, gimple *stmt,
gphi *halting_phi, tree *evolution_of_loop,
int limit)
{
@@ -1170,7 +1170,7 @@ backedge_phi_arg_p (gphi *phi, int i)
static inline t_bool
follow_ssa_edge_in_condition_phi_branch (int i,
- struct loop *loop,
+ class loop *loop,
gphi *condition_phi,
gphi *halting_phi,
tree *evolution_of_branch,
@@ -1205,7 +1205,7 @@ follow_ssa_edge_in_condition_phi_branch (int i,
loop. */
static t_bool
-follow_ssa_edge_in_condition_phi (struct loop *loop,
+follow_ssa_edge_in_condition_phi (class loop *loop,
gphi *condition_phi,
gphi *halting_phi,
tree *evolution_of_loop, int limit)
@@ -1252,12 +1252,12 @@ follow_ssa_edge_in_condition_phi (struct loop *loop,
considered as a single statement. */
static t_bool
-follow_ssa_edge_inner_loop_phi (struct loop *outer_loop,
+follow_ssa_edge_inner_loop_phi (class loop *outer_loop,
gphi *loop_phi_node,
gphi *halting_phi,
tree *evolution_of_loop, int limit)
{
- struct loop *loop = loop_containing_stmt (loop_phi_node);
+ class loop *loop = loop_containing_stmt (loop_phi_node);
tree ev = analyze_scalar_evolution (loop, PHI_RESULT (loop_phi_node));
/* Sometimes, the inner loop is too difficult to analyze, and the
@@ -1299,10 +1299,10 @@ follow_ssa_edge_inner_loop_phi (struct loop *outer_loop,
path that is analyzed on the return walk. */
static t_bool
-follow_ssa_edge (struct loop *loop, gimple *def, gphi *halting_phi,
+follow_ssa_edge (class loop *loop, gimple *def, gphi *halting_phi,
tree *evolution_of_loop, int limit)
{
- struct loop *def_loop;
+ class loop *def_loop;
if (gimple_nop_p (def))
return t_false;
@@ -1374,7 +1374,7 @@ follow_ssa_edge (struct loop *loop, gimple *def, gphi *halting_phi,
See PR41488. */
static tree
-simplify_peeled_chrec (struct loop *loop, tree arg, tree init_cond)
+simplify_peeled_chrec (class loop *loop, tree arg, tree init_cond)
{
aff_tree aff1, aff2;
tree ev, left, right, type, step_val;
@@ -1432,7 +1432,7 @@ analyze_evolution_in_loop (gphi *loop_phi_node,
{
int i, n = gimple_phi_num_args (loop_phi_node);
tree evolution_function = chrec_not_analyzed_yet;
- struct loop *loop = loop_containing_stmt (loop_phi_node);
+ class loop *loop = loop_containing_stmt (loop_phi_node);
basic_block bb;
static bool simplify_peeled_chrec_p = true;
@@ -1560,7 +1560,7 @@ analyze_initial_condition (gphi *loop_phi_node)
{
int i, n;
tree init_cond = chrec_not_analyzed_yet;
- struct loop *loop = loop_containing_stmt (loop_phi_node);
+ class loop *loop = loop_containing_stmt (loop_phi_node);
if (dump_file && (dump_flags & TDF_SCEV))
{
@@ -1617,10 +1617,10 @@ analyze_initial_condition (gphi *loop_phi_node)
/* Analyze the scalar evolution for LOOP_PHI_NODE. */
static tree
-interpret_loop_phi (struct loop *loop, gphi *loop_phi_node)
+interpret_loop_phi (class loop *loop, gphi *loop_phi_node)
{
tree res;
- struct loop *phi_loop = loop_containing_stmt (loop_phi_node);
+ class loop *phi_loop = loop_containing_stmt (loop_phi_node);
tree init_cond;
gcc_assert (phi_loop == loop);
@@ -1654,7 +1654,7 @@ interpret_loop_phi (struct loop *loop, gphi *loop_phi_node)
analyzed. */
static tree
-interpret_condition_phi (struct loop *loop, gphi *condition_phi)
+interpret_condition_phi (class loop *loop, gphi *condition_phi)
{
int i, n = gimple_phi_num_args (condition_phi);
tree res = chrec_not_analyzed_yet;
@@ -1688,7 +1688,7 @@ interpret_condition_phi (struct loop *loop, gphi *condition_phi)
analyze the effect of an inner loop: see interpret_loop_phi. */
static tree
-interpret_rhs_expr (struct loop *loop, gimple *at_stmt,
+interpret_rhs_expr (class loop *loop, gimple *at_stmt,
tree type, tree rhs1, enum tree_code code, tree rhs2)
{
tree res, chrec1, chrec2, ctype;
@@ -1958,7 +1958,7 @@ interpret_rhs_expr (struct loop *loop, gimple *at_stmt,
/* Interpret the expression EXPR. */
static tree
-interpret_expr (struct loop *loop, gimple *at_stmt, tree expr)
+interpret_expr (class loop *loop, gimple *at_stmt, tree expr)
{
enum tree_code code;
tree type = TREE_TYPE (expr), op0, op1;
@@ -1980,7 +1980,7 @@ interpret_expr (struct loop *loop, gimple *at_stmt, tree expr)
/* Interpret the rhs of the assignment STMT. */
static tree
-interpret_gimple_assign (struct loop *loop, gimple *stmt)
+interpret_gimple_assign (class loop *loop, gimple *stmt)
{
tree type = TREE_TYPE (gimple_assign_lhs (stmt));
enum tree_code code = gimple_assign_rhs_code (stmt);
@@ -2001,11 +2001,11 @@ interpret_gimple_assign (struct loop *loop, gimple *stmt)
/* Helper recursive function. */
static tree
-analyze_scalar_evolution_1 (struct loop *loop, tree var)
+analyze_scalar_evolution_1 (class loop *loop, tree var)
{
gimple *def;
basic_block bb;
- struct loop *def_loop;
+ class loop *def_loop;
tree res;
if (TREE_CODE (var) != SSA_NAME)
@@ -2025,7 +2025,7 @@ analyze_scalar_evolution_1 (struct loop *loop, tree var)
if (loop != def_loop)
{
res = analyze_scalar_evolution_1 (def_loop, var);
- struct loop *loop_to_skip = superloop_at_depth (def_loop,
+ class loop *loop_to_skip = superloop_at_depth (def_loop,
loop_depth (loop) + 1);
res = compute_overall_effect_of_inner_loop (loop_to_skip, res);
if (chrec_contains_symbols_defined_in_loop (res, loop->num))
@@ -2077,7 +2077,7 @@ analyze_scalar_evolution_1 (struct loop *loop, tree var)
*/
tree
-analyze_scalar_evolution (struct loop *loop, tree var)
+analyze_scalar_evolution (class loop *loop, tree var)
{
tree res;
@@ -2122,7 +2122,7 @@ analyze_scalar_evolution (struct loop *loop, tree var)
/* Analyzes and returns the scalar evolution of VAR address in LOOP. */
static tree
-analyze_scalar_evolution_for_address_of (struct loop *loop, tree var)
+analyze_scalar_evolution_for_address_of (class loop *loop, tree var)
{
return analyze_scalar_evolution (loop, build_fold_addr_expr (var));
}
@@ -2178,7 +2178,7 @@ analyze_scalar_evolution_for_address_of (struct loop *loop, tree var)
*/
static tree
-analyze_scalar_evolution_in_loop (struct loop *wrto_loop, struct loop *use_loop,
+analyze_scalar_evolution_in_loop (class loop *wrto_loop, class loop *use_loop,
tree version, bool *folded_casts)
{
bool val = false;
@@ -2278,7 +2278,7 @@ get_instantiated_value_entry (instantiate_cache_type &cache,
static tree
loop_closed_phi_def (tree var)
{
- struct loop *loop;
+ class loop *loop;
edge exit;
gphi *phi;
gphi_iterator psi;
@@ -2302,7 +2302,7 @@ loop_closed_phi_def (tree var)
return NULL_TREE;
}
-static tree instantiate_scev_r (edge, struct loop *, struct loop *,
+static tree instantiate_scev_r (edge, class loop *, class loop *,
tree, bool *, int);
/* Analyze all the parameters of the chrec, between INSTANTIATE_BELOW
@@ -2322,13 +2322,13 @@ static tree instantiate_scev_r (edge, struct loop *, struct loop *,
static tree
instantiate_scev_name (edge instantiate_below,
- struct loop *evolution_loop, struct loop *inner_loop,
+ class loop *evolution_loop, class loop *inner_loop,
tree chrec,
bool *fold_conversions,
int size_expr)
{
tree res;
- struct loop *def_loop;
+ class loop *def_loop;
basic_block def_bb = gimple_bb (SSA_NAME_DEF_STMT (chrec));
/* A parameter, nothing to do. */
@@ -2472,7 +2472,7 @@ instantiate_scev_name (edge instantiate_below,
static tree
instantiate_scev_poly (edge instantiate_below,
- struct loop *evolution_loop, struct loop *,
+ class loop *evolution_loop, class loop *,
tree chrec, bool *fold_conversions, int size_expr)
{
tree op1;
@@ -2517,7 +2517,7 @@ instantiate_scev_poly (edge instantiate_below,
static tree
instantiate_scev_binary (edge instantiate_below,
- struct loop *evolution_loop, struct loop *inner_loop,
+ class loop *evolution_loop, class loop *inner_loop,
tree chrec, enum tree_code code,
tree type, tree c0, tree c1,
bool *fold_conversions, int size_expr)
@@ -2585,7 +2585,7 @@ instantiate_scev_binary (edge instantiate_below,
static tree
instantiate_scev_convert (edge instantiate_below,
- struct loop *evolution_loop, struct loop *inner_loop,
+ class loop *evolution_loop, class loop *inner_loop,
tree chrec, tree type, tree op,
bool *fold_conversions, int size_expr)
{
@@ -2636,7 +2636,7 @@ instantiate_scev_convert (edge instantiate_below,
static tree
instantiate_scev_not (edge instantiate_below,
- struct loop *evolution_loop, struct loop *inner_loop,
+ class loop *evolution_loop, class loop *inner_loop,
tree chrec,
enum tree_code code, tree type, tree op,
bool *fold_conversions, int size_expr)
@@ -2687,7 +2687,7 @@ instantiate_scev_not (edge instantiate_below,
static tree
instantiate_scev_r (edge instantiate_below,
- struct loop *evolution_loop, struct loop *inner_loop,
+ class loop *evolution_loop, class loop *inner_loop,
tree chrec,
bool *fold_conversions, int size_expr)
{
@@ -2761,7 +2761,7 @@ instantiate_scev_r (edge instantiate_below,
a function parameter. */
tree
-instantiate_scev (edge instantiate_below, struct loop *evolution_loop,
+instantiate_scev (edge instantiate_below, class loop *evolution_loop,
tree chrec)
{
tree res;
@@ -2810,7 +2810,7 @@ instantiate_scev (edge instantiate_below, struct loop *evolution_loop,
of an expression. */
tree
-resolve_mixers (struct loop *loop, tree chrec, bool *folded_casts)
+resolve_mixers (class loop *loop, tree chrec, bool *folded_casts)
{
bool destr = false;
bool fold_conversions = false;
@@ -2859,10 +2859,10 @@ resolve_mixers (struct loop *loop, tree chrec, bool *folded_casts)
the loop body has been executed 6 times. */
tree
-number_of_latch_executions (struct loop *loop)
+number_of_latch_executions (class loop *loop)
{
edge exit;
- struct tree_niter_desc niter_desc;
+ class tree_niter_desc niter_desc;
tree may_be_zero;
tree res;
@@ -3047,7 +3047,7 @@ gather_stats_on_scev_database (void)
void
scev_initialize (void)
{
- struct loop *loop;
+ class loop *loop;
gcc_assert (! scev_initialized_p ());
@@ -3085,7 +3085,7 @@ scev_reset_htab (void)
void
scev_reset (void)
{
- struct loop *loop;
+ class loop *loop;
scev_reset_htab ();
@@ -3104,7 +3104,7 @@ scev_reset (void)
hypotetical IVs to be inserted into code. */
bool
-iv_can_overflow_p (struct loop *loop, tree type, tree base, tree step)
+iv_can_overflow_p (class loop *loop, tree type, tree base, tree step)
{
widest_int nit;
wide_int base_min, base_max, step_min, step_max, type_min, type_max;
@@ -3267,7 +3267,7 @@ derive_simple_iv_with_niters (tree ev, tree *niters)
infinite. */
bool
-simple_iv_with_niters (struct loop *wrto_loop, struct loop *use_loop,
+simple_iv_with_niters (class loop *wrto_loop, class loop *use_loop,
tree op, affine_iv *iv, tree *iv_niters,
bool allow_nonconstant_step)
{
@@ -3407,7 +3407,7 @@ simple_iv_with_niters (struct loop *wrto_loop, struct loop *use_loop,
affine iv unconditionally. */
bool
-simple_iv (struct loop *wrto_loop, struct loop *use_loop, tree op,
+simple_iv (class loop *wrto_loop, class loop *use_loop, tree op,
affine_iv *iv, bool allow_nonconstant_step)
{
return simple_iv_with_niters (wrto_loop, use_loop, op, iv,
@@ -3565,7 +3565,7 @@ expression_expensive_p (tree expr)
/* Do final value replacement for LOOP, return true if we did anything. */
bool
-final_value_replacement_loop (struct loop *loop)
+final_value_replacement_loop (class loop *loop)
{
/* If we do not know exact number of iterations of the loop, we cannot
replace the final value. */
@@ -3584,7 +3584,7 @@ final_value_replacement_loop (struct loop *loop)
/* Set stmt insertion pointer. All stmts are inserted before this point. */
gimple_stmt_iterator gsi = gsi_after_labels (exit->dest);
- struct loop *ex_loop
+ class loop *ex_loop
= superloop_at_depth (loop,
loop_depth (exit->dest->loop_father) + 1);
diff --git a/gcc/tree-scalar-evolution.h b/gcc/tree-scalar-evolution.h
index 621a57c9e2d..d4d6ec58e53 100644
--- a/gcc/tree-scalar-evolution.h
+++ b/gcc/tree-scalar-evolution.h
@@ -21,27 +21,27 @@ along with GCC; see the file COPYING3. If not see
#ifndef GCC_TREE_SCALAR_EVOLUTION_H
#define GCC_TREE_SCALAR_EVOLUTION_H
-extern tree number_of_latch_executions (struct loop *);
-extern gcond *get_loop_exit_condition (const struct loop *);
+extern tree number_of_latch_executions (class loop *);
+extern gcond *get_loop_exit_condition (const class loop *);
extern void scev_initialize (void);
extern bool scev_initialized_p (void);
extern void scev_reset (void);
extern void scev_reset_htab (void);
extern void scev_finalize (void);
-extern tree analyze_scalar_evolution (struct loop *, tree);
-extern tree instantiate_scev (edge, struct loop *, tree);
-extern tree resolve_mixers (struct loop *, tree, bool *);
+extern tree analyze_scalar_evolution (class loop *, tree);
+extern tree instantiate_scev (edge, class loop *, tree);
+extern tree resolve_mixers (class loop *, tree, bool *);
extern void gather_stats_on_scev_database (void);
-extern bool final_value_replacement_loop (struct loop *);
+extern bool final_value_replacement_loop (class loop *);
extern unsigned int scev_const_prop (void);
extern bool expression_expensive_p (tree);
-extern bool simple_iv_with_niters (struct loop *, struct loop *, tree,
+extern bool simple_iv_with_niters (class loop *, class loop *, tree,
struct affine_iv *, tree *, bool);
-extern bool simple_iv (struct loop *, struct loop *, tree, struct affine_iv *,
+extern bool simple_iv (class loop *, class loop *, tree, struct affine_iv *,
bool);
-extern bool iv_can_overflow_p (struct loop *, tree, tree, tree);
-extern tree compute_overall_effect_of_inner_loop (struct loop *, tree);
+extern bool iv_can_overflow_p (class loop *, tree, tree, tree);
+extern tree compute_overall_effect_of_inner_loop (class loop *, tree);
/* Returns the basic block preceding LOOP, or the CFG entry block when
the loop is function's body. */
@@ -58,14 +58,14 @@ block_before_loop (loop_p loop)
be analyzed and instantiated. */
static inline tree
-instantiate_parameters (struct loop *loop, tree chrec)
+instantiate_parameters (class loop *loop, tree chrec)
{
return instantiate_scev (loop_preheader_edge (loop), loop, chrec);
}
/* Returns the loop of the polynomial chrec CHREC. */
-static inline struct loop *
+static inline class loop *
get_chrec_loop (const_tree chrec)
{
return get_loop (cfun, CHREC_VARIABLE (chrec));
diff --git a/gcc/tree-ssa-address.h b/gcc/tree-ssa-address.h
index 9812f36fbcb..05a23212488 100644
--- a/gcc/tree-ssa-address.h
+++ b/gcc/tree-ssa-address.h
@@ -33,9 +33,9 @@ extern void get_address_description (tree, struct mem_address *);
extern tree tree_mem_ref_addr (tree, tree);
extern bool valid_mem_ref_p (machine_mode, addr_space_t, struct mem_address *);
extern void move_fixed_address_to_symbol (struct mem_address *,
- struct aff_tree *);
+ class aff_tree *);
tree create_mem_ref (gimple_stmt_iterator *, tree,
- struct aff_tree *, tree, tree, tree, bool);
+ class aff_tree *, tree, tree, tree, bool);
extern void copy_ref_info (tree, tree);
tree maybe_fold_tmr (tree);
diff --git a/gcc/tree-ssa-dce.c b/gcc/tree-ssa-dce.c
index c73fbabfe29..6398c1e4457 100644
--- a/gcc/tree-ssa-dce.c
+++ b/gcc/tree-ssa-dce.c
@@ -411,7 +411,7 @@ find_obviously_necessary_stmts (bool aggressive)
/* Prevent the empty possibly infinite loops from being removed. */
if (aggressive)
{
- struct loop *loop;
+ class loop *loop;
if (mark_irreducible_loops ())
FOR_EACH_BB_FN (bb, cfun)
{
diff --git a/gcc/tree-ssa-dom.c b/gcc/tree-ssa-dom.c
index 17c852d5299..2d0386670e6 100644
--- a/gcc/tree-ssa-dom.c
+++ b/gcc/tree-ssa-dom.c
@@ -395,7 +395,7 @@ edge_info::record_simple_equiv (tree lhs, tree rhs)
void
free_dom_edge_info (edge e)
{
- class edge_info *edge_info = (struct edge_info *)e->aux;
+ class edge_info *edge_info = (class edge_info *)e->aux;
if (edge_info)
delete edge_info;
@@ -543,7 +543,7 @@ record_edge_info (basic_block bb)
bool can_infer_simple_equiv
= !(HONOR_SIGNED_ZEROS (op0)
&& real_zerop (op0));
- struct edge_info *edge_info;
+ class edge_info *edge_info;
edge_info = new class edge_info (true_edge);
record_conditions (&edge_info->cond_equivalences, cond, inverted);
@@ -567,7 +567,7 @@ record_edge_info (basic_block bb)
bool can_infer_simple_equiv
= !(HONOR_SIGNED_ZEROS (op1)
&& (TREE_CODE (op1) == SSA_NAME || real_zerop (op1)));
- struct edge_info *edge_info;
+ class edge_info *edge_info;
edge_info = new class edge_info (true_edge);
record_conditions (&edge_info->cond_equivalences, cond, inverted);
diff --git a/gcc/tree-ssa-live.c b/gcc/tree-ssa-live.c
index e9ae8e0cf75..99087946c3e 100644
--- a/gcc/tree-ssa-live.c
+++ b/gcc/tree-ssa-live.c
@@ -79,7 +79,7 @@ var_map_base_fini (var_map map)
function. */
var_map
-init_var_map (int size, struct loop *loop)
+init_var_map (int size, class loop *loop)
{
var_map map;
@@ -852,7 +852,7 @@ remove_unused_locals (void)
if (cfun->has_simduid_loops)
{
- struct loop *loop;
+ class loop *loop;
FOR_EACH_LOOP (loop, 0)
if (loop->simduid && !is_used_p (loop->simduid))
loop->simduid = NULL_TREE;
diff --git a/gcc/tree-ssa-live.h b/gcc/tree-ssa-live.h
index 78b033b8ee9..5bef4d5d767 100644
--- a/gcc/tree-ssa-live.h
+++ b/gcc/tree-ssa-live.h
@@ -80,7 +80,7 @@ typedef struct _var_map
/* Value used to represent no partition number. */
#define NO_PARTITION -1
-extern var_map init_var_map (int, struct loop* = NULL);
+extern var_map init_var_map (int, class loop* = NULL);
extern void delete_var_map (var_map);
extern int var_union (var_map, tree, tree);
extern void partition_view_normal (var_map);
diff --git a/gcc/tree-ssa-loop-ch.c b/gcc/tree-ssa-loop-ch.c
index 25f562b0cec..d92d7c85690 100644
--- a/gcc/tree-ssa-loop-ch.c
+++ b/gcc/tree-ssa-loop-ch.c
@@ -48,7 +48,7 @@ along with GCC; see the file COPYING3. If not see
amount. */
static bool
-should_duplicate_loop_header_p (basic_block header, struct loop *loop,
+should_duplicate_loop_header_p (basic_block header, class loop *loop,
int *limit)
{
gimple_stmt_iterator bsi;
@@ -211,7 +211,7 @@ should_duplicate_loop_header_p (basic_block header, struct loop *loop,
/* Checks whether LOOP is a do-while style loop. */
static bool
-do_while_loop_p (struct loop *loop)
+do_while_loop_p (class loop *loop)
{
gimple *stmt = last_stmt (loop->latch);
@@ -268,7 +268,7 @@ class ch_base : public gimple_opt_pass
unsigned int copy_headers (function *fun);
/* Return true to copy headers of LOOP or false to skip. */
- virtual bool process_loop_p (struct loop *loop) = 0;
+ virtual bool process_loop_p (class loop *loop) = 0;
};
const pass_data pass_data_ch =
@@ -301,7 +301,7 @@ public:
protected:
/* ch_base method: */
- virtual bool process_loop_p (struct loop *loop);
+ virtual bool process_loop_p (class loop *loop);
}; // class pass_ch
const pass_data pass_data_ch_vect =
@@ -339,7 +339,7 @@ public:
protected:
/* ch_base method: */
- virtual bool process_loop_p (struct loop *loop);
+ virtual bool process_loop_p (class loop *loop);
}; // class pass_ch_vect
/* For all loops, copy the condition at the end of the loop body in front
@@ -349,7 +349,7 @@ protected:
unsigned int
ch_base::copy_headers (function *fun)
{
- struct loop *loop;
+ class loop *loop;
basic_block header;
edge exit, entry;
basic_block *bbs, *copied_bbs;
@@ -549,7 +549,7 @@ pass_ch_vect::execute (function *fun)
/* Apply header copying according to a very simple test of do-while shape. */
bool
-pass_ch::process_loop_p (struct loop *loop)
+pass_ch::process_loop_p (class loop *loop)
{
return !do_while_loop_p (loop);
}
@@ -557,7 +557,7 @@ pass_ch::process_loop_p (struct loop *loop)
/* Apply header-copying to loops where we might enable vectorization. */
bool
-pass_ch_vect::process_loop_p (struct loop *loop)
+pass_ch_vect::process_loop_p (class loop *loop)
{
if (!flag_tree_loop_vectorize && !loop->force_vectorize)
return false;
diff --git a/gcc/tree-ssa-loop-im.c b/gcc/tree-ssa-loop-im.c
index f8bcabc0aa5..12176e07636 100644
--- a/gcc/tree-ssa-loop-im.c
+++ b/gcc/tree-ssa-loop-im.c
@@ -75,13 +75,13 @@ along with GCC; see the file COPYING3. If not see
struct lim_aux_data
{
- struct loop *max_loop; /* The outermost loop in that the statement
+ class loop *max_loop; /* The outermost loop in that the statement
is invariant. */
- struct loop *tgt_loop; /* The loop out of that we want to move the
+ class loop *tgt_loop; /* The loop out of that we want to move the
invariant. */
- struct loop *always_executed_in;
+ class loop *always_executed_in;
/* The outermost loop for that we are sure
the statement is executed if the loop
is entered. */
@@ -160,7 +160,7 @@ struct mem_ref_hasher : nofree_ptr_hash <im_mem_ref>
static inline bool equal (const im_mem_ref *, const ao_ref *);
};
-/* A hash function for struct im_mem_ref object OBJ. */
+/* A hash function for class im_mem_ref object OBJ. */
inline hashval_t
mem_ref_hasher::hash (const im_mem_ref *mem)
@@ -168,7 +168,7 @@ mem_ref_hasher::hash (const im_mem_ref *mem)
return mem->hash;
}
-/* An equality function for struct im_mem_ref object MEM1 with
+/* An equality function for class im_mem_ref object MEM1 with
memory reference OBJ2. */
inline bool
@@ -226,15 +226,15 @@ static struct
static bitmap_obstack lim_bitmap_obstack;
static obstack mem_ref_obstack;
-static bool ref_indep_loop_p (struct loop *, im_mem_ref *);
-static bool ref_always_accessed_p (struct loop *, im_mem_ref *, bool);
+static bool ref_indep_loop_p (class loop *, im_mem_ref *);
+static bool ref_always_accessed_p (class loop *, im_mem_ref *, bool);
/* Minimum cost of an expensive expression. */
#define LIM_EXPENSIVE ((unsigned) PARAM_VALUE (PARAM_LIM_EXPENSIVE))
/* The outermost loop for which execution of the header guarantees that the
block will be executed. */
-#define ALWAYS_EXECUTED_IN(BB) ((struct loop *) (BB)->aux)
+#define ALWAYS_EXECUTED_IN(BB) ((class loop *) (BB)->aux)
#define SET_ALWAYS_EXECUTED_IN(BB, VAL) ((BB)->aux = (void *) (VAL))
/* ID of the shared unanalyzable mem. */
@@ -396,12 +396,12 @@ movement_possibility (gimple *stmt)
other operands, i.e. the outermost loop enclosing LOOP in that the value
of DEF is invariant. */
-static struct loop *
-outermost_invariant_loop (tree def, struct loop *loop)
+static class loop *
+outermost_invariant_loop (tree def, class loop *loop)
{
gimple *def_stmt;
basic_block def_bb;
- struct loop *max_loop;
+ class loop *max_loop;
struct lim_aux_data *lim_data;
if (!def)
@@ -444,12 +444,12 @@ outermost_invariant_loop (tree def, struct loop *loop)
If DEF is not invariant in LOOP, return false. Otherwise return TRUE. */
static bool
-add_dependency (tree def, struct lim_aux_data *data, struct loop *loop,
+add_dependency (tree def, struct lim_aux_data *data, class loop *loop,
bool add_cost)
{
gimple *def_stmt = SSA_NAME_DEF_STMT (def);
basic_block def_bb = gimple_bb (def_stmt);
- struct loop *max_loop;
+ class loop *max_loop;
struct lim_aux_data *def_data;
if (!def_bb)
@@ -560,10 +560,10 @@ stmt_cost (gimple *stmt)
REF is independent. If REF is not independent in LOOP, NULL is returned
instead. */
-static struct loop *
-outermost_indep_loop (struct loop *outer, struct loop *loop, im_mem_ref *ref)
+static class loop *
+outermost_indep_loop (class loop *outer, class loop *loop, im_mem_ref *ref)
{
- struct loop *aloop;
+ class loop *aloop;
if (ref->stored && bitmap_bit_p (ref->stored, loop->num))
return NULL;
@@ -649,8 +649,8 @@ static bool
determine_max_movement (gimple *stmt, bool must_preserve_exec)
{
basic_block bb = gimple_bb (stmt);
- struct loop *loop = bb->loop_father;
- struct loop *level;
+ class loop *loop = bb->loop_father;
+ class loop *level;
struct lim_aux_data *lim_data = get_lim_data (stmt);
tree val;
ssa_op_iter iter;
@@ -777,9 +777,9 @@ determine_max_movement (gimple *stmt, bool must_preserve_exec)
operands) is hoisted at least out of the loop LEVEL. */
static void
-set_level (gimple *stmt, struct loop *orig_loop, struct loop *level)
+set_level (gimple *stmt, class loop *orig_loop, class loop *level)
{
- struct loop *stmt_loop = gimple_bb (stmt)->loop_father;
+ class loop *stmt_loop = gimple_bb (stmt)->loop_father;
struct lim_aux_data *lim_data;
gimple *dep_stmt;
unsigned i;
@@ -974,7 +974,7 @@ invariantness_dom_walker::before_dom_children (basic_block bb)
gimple_stmt_iterator bsi;
gimple *stmt;
bool maybe_never = ALWAYS_EXECUTED_IN (bb) == NULL;
- struct loop *outermost = ALWAYS_EXECUTED_IN (bb);
+ class loop *outermost = ALWAYS_EXECUTED_IN (bb);
struct lim_aux_data *lim_data;
if (!loop_outer (bb->loop_father))
@@ -1053,7 +1053,7 @@ invariantness_dom_walker::before_dom_children (basic_block bb)
{
tree op0 = gimple_assign_rhs1 (stmt);
tree op1 = gimple_assign_rhs2 (stmt);
- struct loop *ol1 = outermost_invariant_loop (op1,
+ class loop *ol1 = outermost_invariant_loop (op1,
loop_containing_stmt (stmt));
/* If divisor is invariant, convert a/b to a*(1/b), allowing reciprocal
@@ -1112,7 +1112,7 @@ invariantness_dom_walker::before_dom_children (basic_block bb)
unsigned int
move_computations_worker (basic_block bb)
{
- struct loop *level;
+ class loop *level;
unsigned cost = 0;
struct lim_aux_data *lim_data;
unsigned int todo = 0;
@@ -1296,7 +1296,7 @@ move_computations (void)
static bool
may_move_till (tree ref, tree *index, void *data)
{
- struct loop *loop = (struct loop *) data, *max_loop;
+ class loop *loop = (class loop *) data, *max_loop;
/* If REF is an array reference, check also that the step and the lower
bound is invariant in LOOP. */
@@ -1325,7 +1325,7 @@ may_move_till (tree ref, tree *index, void *data)
moved out of the LOOP. ORIG_LOOP is the loop in that EXPR is used. */
static void
-force_move_till_op (tree op, struct loop *orig_loop, struct loop *loop)
+force_move_till_op (tree op, class loop *orig_loop, class loop *loop)
{
gimple *stmt;
@@ -1348,8 +1348,8 @@ force_move_till_op (tree op, struct loop *orig_loop, struct loop *loop)
struct fmt_data
{
- struct loop *loop;
- struct loop *orig_loop;
+ class loop *loop;
+ class loop *orig_loop;
};
static bool
@@ -1374,7 +1374,7 @@ force_move_till (tree ref, tree *index, void *data)
/* A function to free the mem_ref object OBJ. */
static void
-memref_free (struct im_mem_ref *mem)
+memref_free (class im_mem_ref *mem)
{
mem->accesses_in_loop.release ();
}
@@ -1385,7 +1385,7 @@ memref_free (struct im_mem_ref *mem)
static im_mem_ref *
mem_ref_alloc (ao_ref *mem, unsigned hash, unsigned id)
{
- im_mem_ref *ref = XOBNEW (&mem_ref_obstack, struct im_mem_ref);
+ im_mem_ref *ref = XOBNEW (&mem_ref_obstack, class im_mem_ref);
if (mem)
ref->mem = *mem;
else
@@ -1418,7 +1418,7 @@ record_mem_ref_loc (im_mem_ref *ref, gimple *stmt, tree *loc)
necessary. Return whether a bit was changed. */
static bool
-set_ref_stored_in_loop (im_mem_ref *ref, struct loop *loop)
+set_ref_stored_in_loop (im_mem_ref *ref, class loop *loop)
{
if (!ref->stored)
ref->stored = BITMAP_ALLOC (&lim_bitmap_obstack);
@@ -1428,7 +1428,7 @@ set_ref_stored_in_loop (im_mem_ref *ref, struct loop *loop)
/* Marks reference REF as stored in LOOP. */
static void
-mark_ref_stored (im_mem_ref *ref, struct loop *loop)
+mark_ref_stored (im_mem_ref *ref, class loop *loop)
{
while (loop != current_loops->tree_root
&& set_ref_stored_in_loop (ref, loop))
@@ -1441,7 +1441,7 @@ mark_ref_stored (im_mem_ref *ref, struct loop *loop)
well. */
static void
-gather_mem_refs_stmt (struct loop *loop, gimple *stmt)
+gather_mem_refs_stmt (class loop *loop, gimple *stmt)
{
tree *mem = NULL;
hashval_t hash;
@@ -1583,8 +1583,8 @@ sort_bbs_in_loop_postorder_cmp (const void *bb1_, const void *bb2_)
{
basic_block bb1 = *(basic_block *)const_cast<void *>(bb1_);
basic_block bb2 = *(basic_block *)const_cast<void *>(bb2_);
- struct loop *loop1 = bb1->loop_father;
- struct loop *loop2 = bb2->loop_father;
+ class loop *loop1 = bb1->loop_father;
+ class loop *loop2 = bb2->loop_father;
if (loop1->num == loop2->num)
return bb1->index - bb2->index;
return bb_loop_postorder[loop1->num] < bb_loop_postorder[loop2->num] ? -1 : 1;
@@ -1597,8 +1597,8 @@ sort_locs_in_loop_postorder_cmp (const void *loc1_, const void *loc2_)
{
mem_ref_loc *loc1 = (mem_ref_loc *)const_cast<void *>(loc1_);
mem_ref_loc *loc2 = (mem_ref_loc *)const_cast<void *>(loc2_);
- struct loop *loop1 = gimple_bb (loc1->stmt)->loop_father;
- struct loop *loop2 = gimple_bb (loc2->stmt)->loop_father;
+ class loop *loop1 = gimple_bb (loc1->stmt)->loop_father;
+ class loop *loop2 = gimple_bb (loc2->stmt)->loop_father;
if (loop1->num == loop2->num)
return 0;
return bb_loop_postorder[loop1->num] < bb_loop_postorder[loop2->num] ? -1 : 1;
@@ -1611,7 +1611,7 @@ analyze_memory_references (void)
{
gimple_stmt_iterator bsi;
basic_block bb, *bbs;
- struct loop *loop, *outer;
+ class loop *loop, *outer;
unsigned i, n;
/* Collect all basic-blocks in loops and sort them after their
@@ -1702,9 +1702,9 @@ mem_refs_may_alias_p (im_mem_ref *mem1, im_mem_ref *mem2,
static int
find_ref_loc_in_loop_cmp (const void *loop_, const void *loc_)
{
- struct loop *loop = (struct loop *)const_cast<void *>(loop_);
+ class loop *loop = (class loop *)const_cast<void *>(loop_);
mem_ref_loc *loc = (mem_ref_loc *)const_cast<void *>(loc_);
- struct loop *loc_loop = gimple_bb (loc->stmt)->loop_father;
+ class loop *loc_loop = gimple_bb (loc->stmt)->loop_father;
if (loop->num == loc_loop->num
|| flow_loop_nested_p (loop, loc_loop))
return 0;
@@ -1719,7 +1719,7 @@ find_ref_loc_in_loop_cmp (const void *loop_, const void *loc_)
template <typename FN>
static bool
-for_all_locs_in_loop (struct loop *loop, im_mem_ref *ref, FN fn)
+for_all_locs_in_loop (class loop *loop, im_mem_ref *ref, FN fn)
{
unsigned i;
mem_ref_loc *loc;
@@ -1776,7 +1776,7 @@ rewrite_mem_ref_loc::operator () (mem_ref_loc *loc)
/* Rewrites all references to REF in LOOP by variable TMP_VAR. */
static void
-rewrite_mem_refs (struct loop *loop, im_mem_ref *ref, tree tmp_var)
+rewrite_mem_refs (class loop *loop, im_mem_ref *ref, tree tmp_var)
{
for_all_locs_in_loop (loop, ref, rewrite_mem_ref_loc (tmp_var));
}
@@ -1801,7 +1801,7 @@ first_mem_ref_loc_1::operator () (mem_ref_loc *loc)
/* Returns the first reference location to REF in LOOP. */
static mem_ref_loc *
-first_mem_ref_loc (struct loop *loop, im_mem_ref *ref)
+first_mem_ref_loc (class loop *loop, im_mem_ref *ref)
{
mem_ref_loc *locp = NULL;
for_all_locs_in_loop (loop, ref, first_mem_ref_loc_1 (&locp));
@@ -2049,7 +2049,7 @@ sm_set_flag_if_changed::operator () (mem_ref_loc *loc)
set, set an appropriate flag indicating the store. */
static tree
-execute_sm_if_changed_flag_set (struct loop *loop, im_mem_ref *ref,
+execute_sm_if_changed_flag_set (class loop *loop, im_mem_ref *ref,
hash_set <basic_block> *bbs)
{
tree flag;
@@ -2065,7 +2065,7 @@ execute_sm_if_changed_flag_set (struct loop *loop, im_mem_ref *ref,
to the reference from the temporary variable are emitted to exits. */
static void
-execute_sm (struct loop *loop, vec<edge> exits, im_mem_ref *ref)
+execute_sm (class loop *loop, vec<edge> exits, im_mem_ref *ref)
{
tree tmp_var, store_flag = NULL_TREE;
unsigned i;
@@ -2141,7 +2141,7 @@ execute_sm (struct loop *loop, vec<edge> exits, im_mem_ref *ref)
edges of the LOOP. */
static void
-hoist_memory_references (struct loop *loop, bitmap mem_refs,
+hoist_memory_references (class loop *loop, bitmap mem_refs,
vec<edge> exits)
{
im_mem_ref *ref;
@@ -2158,17 +2158,17 @@ hoist_memory_references (struct loop *loop, bitmap mem_refs,
class ref_always_accessed
{
public:
- ref_always_accessed (struct loop *loop_, bool stored_p_)
+ ref_always_accessed (class loop *loop_, bool stored_p_)
: loop (loop_), stored_p (stored_p_) {}
bool operator () (mem_ref_loc *loc);
- struct loop *loop;
+ class loop *loop;
bool stored_p;
};
bool
ref_always_accessed::operator () (mem_ref_loc *loc)
{
- struct loop *must_exec;
+ class loop *must_exec;
if (!get_lim_data (loc->stmt))
return false;
@@ -2198,7 +2198,7 @@ ref_always_accessed::operator () (mem_ref_loc *loc)
make sure REF is always stored to in LOOP. */
static bool
-ref_always_accessed_p (struct loop *loop, im_mem_ref *ref, bool stored_p)
+ref_always_accessed_p (class loop *loop, im_mem_ref *ref, bool stored_p)
{
return for_all_locs_in_loop (loop, ref,
ref_always_accessed (loop, stored_p));
@@ -2234,7 +2234,7 @@ refs_independent_p (im_mem_ref *ref1, im_mem_ref *ref2)
and its super-loops. */
static void
-record_dep_loop (struct loop *loop, im_mem_ref *ref, bool stored_p)
+record_dep_loop (class loop *loop, im_mem_ref *ref, bool stored_p)
{
/* We can propagate dependent-in-loop bits up the loop
hierarchy to all outer loops. */
@@ -2247,7 +2247,7 @@ record_dep_loop (struct loop *loop, im_mem_ref *ref, bool stored_p)
references in LOOP. */
static bool
-ref_indep_loop_p_1 (struct loop *loop, im_mem_ref *ref, bool stored_p)
+ref_indep_loop_p_1 (class loop *loop, im_mem_ref *ref, bool stored_p)
{
stored_p |= (ref->stored && bitmap_bit_p (ref->stored, loop->num));
@@ -2268,7 +2268,7 @@ ref_indep_loop_p_1 (struct loop *loop, im_mem_ref *ref, bool stored_p)
if (bitmap_bit_p (&ref->dep_loop, LOOP_DEP_BIT (loop->num, stored_p)))
return false;
- struct loop *inner = loop->inner;
+ class loop *inner = loop->inner;
while (inner)
{
if (!ref_indep_loop_p_1 (inner, ref, stored_p))
@@ -2328,7 +2328,7 @@ ref_indep_loop_p_1 (struct loop *loop, im_mem_ref *ref, bool stored_p)
LOOP. */
static bool
-ref_indep_loop_p (struct loop *loop, im_mem_ref *ref)
+ref_indep_loop_p (class loop *loop, im_mem_ref *ref)
{
gcc_checking_assert (MEM_ANALYZABLE (ref));
@@ -2338,7 +2338,7 @@ ref_indep_loop_p (struct loop *loop, im_mem_ref *ref)
/* Returns true if we can perform store motion of REF from LOOP. */
static bool
-can_sm_ref_p (struct loop *loop, im_mem_ref *ref)
+can_sm_ref_p (class loop *loop, im_mem_ref *ref)
{
tree base;
@@ -2379,7 +2379,7 @@ can_sm_ref_p (struct loop *loop, im_mem_ref *ref)
motion was performed in one of the outer loops. */
static void
-find_refs_for_sm (struct loop *loop, bitmap sm_executed, bitmap refs_to_sm)
+find_refs_for_sm (class loop *loop, bitmap sm_executed, bitmap refs_to_sm)
{
bitmap refs = &memory_accesses.all_refs_stored_in_loop[loop->num];
unsigned i;
@@ -2399,7 +2399,7 @@ find_refs_for_sm (struct loop *loop, bitmap sm_executed, bitmap refs_to_sm)
on its exits). */
static bool
-loop_suitable_for_sm (struct loop *loop ATTRIBUTE_UNUSED,
+loop_suitable_for_sm (class loop *loop ATTRIBUTE_UNUSED,
vec<edge> exits)
{
unsigned i;
@@ -2417,10 +2417,10 @@ loop_suitable_for_sm (struct loop *loop ATTRIBUTE_UNUSED,
store motion was executed in one of the outer loops. */
static void
-store_motion_loop (struct loop *loop, bitmap sm_executed)
+store_motion_loop (class loop *loop, bitmap sm_executed)
{
vec<edge> exits = get_loop_exit_edges (loop);
- struct loop *subloop;
+ class loop *subloop;
bitmap sm_in_loop = BITMAP_ALLOC (&lim_bitmap_obstack);
if (loop_suitable_for_sm (loop, exits))
@@ -2443,7 +2443,7 @@ store_motion_loop (struct loop *loop, bitmap sm_executed)
static void
store_motion (void)
{
- struct loop *loop;
+ class loop *loop;
bitmap sm_executed = BITMAP_ALLOC (&lim_bitmap_obstack);
for (loop = current_loops->tree_root->inner; loop != NULL; loop = loop->next)
@@ -2459,12 +2459,12 @@ store_motion (void)
blocks that contain a nonpure call. */
static void
-fill_always_executed_in_1 (struct loop *loop, sbitmap contains_call)
+fill_always_executed_in_1 (class loop *loop, sbitmap contains_call)
{
basic_block bb = NULL, *bbs, last = NULL;
unsigned i;
edge e;
- struct loop *inn_loop = loop;
+ class loop *inn_loop = loop;
if (ALWAYS_EXECUTED_IN (loop->header) == NULL)
{
@@ -2537,7 +2537,7 @@ static void
fill_always_executed_in (void)
{
basic_block bb;
- struct loop *loop;
+ class loop *loop;
auto_sbitmap contains_call (last_basic_block_for_fn (cfun));
bitmap_clear (contains_call);
@@ -2564,7 +2564,7 @@ fill_always_executed_in (void)
static void
tree_ssa_lim_initialize (void)
{
- struct loop *loop;
+ class loop *loop;
unsigned i;
bitmap_obstack_initialize (&lim_bitmap_obstack);
diff --git a/gcc/tree-ssa-loop-ivcanon.c b/gcc/tree-ssa-loop-ivcanon.c
index eb0c1c94b92..5952cad7bba 100644
--- a/gcc/tree-ssa-loop-ivcanon.c
+++ b/gcc/tree-ssa-loop-ivcanon.c
@@ -82,7 +82,7 @@ enum unroll_level
if they are not NULL. */
void
-create_canonical_iv (struct loop *loop, edge exit, tree niter,
+create_canonical_iv (class loop *loop, edge exit, tree niter,
tree *var_before = NULL, tree *var_after = NULL)
{
edge in;
@@ -161,7 +161,7 @@ struct loop_size
/* Return true if OP in STMT will be constant after peeling LOOP. */
static bool
-constant_after_peeling (tree op, gimple *stmt, struct loop *loop)
+constant_after_peeling (tree op, gimple *stmt, class loop *loop)
{
if (is_gimple_min_invariant (op))
return true;
@@ -211,7 +211,7 @@ constant_after_peeling (tree op, gimple *stmt, struct loop *loop)
Stop estimating after UPPER_BOUND is met. Return true in this case. */
static bool
-tree_estimate_loop_size (struct loop *loop, edge exit, edge edge_to_cancel,
+tree_estimate_loop_size (class loop *loop, edge exit, edge edge_to_cancel,
struct loop_size *size, int upper_bound)
{
basic_block *body = get_loop_body (loop);
@@ -441,7 +441,7 @@ estimated_unrolled_size (struct loop_size *size,
The other cases are hopefully rare and will be cleaned up later. */
static edge
-loop_edge_to_cancel (struct loop *loop)
+loop_edge_to_cancel (class loop *loop)
{
vec<edge> exits;
unsigned i;
@@ -495,9 +495,9 @@ loop_edge_to_cancel (struct loop *loop)
known to not be executed. */
static bool
-remove_exits_and_undefined_stmts (struct loop *loop, unsigned int npeeled)
+remove_exits_and_undefined_stmts (class loop *loop, unsigned int npeeled)
{
- struct nb_iter_bound *elt;
+ class nb_iter_bound *elt;
bool changed = false;
for (elt = loop->bounds; elt; elt = elt->next)
@@ -553,9 +553,9 @@ remove_exits_and_undefined_stmts (struct loop *loop, unsigned int npeeled)
discovered. */
static bool
-remove_redundant_iv_tests (struct loop *loop)
+remove_redundant_iv_tests (class loop *loop)
{
- struct nb_iter_bound *elt;
+ class nb_iter_bound *elt;
bool changed = false;
if (!loop->any_upper_bound)
@@ -569,7 +569,7 @@ remove_redundant_iv_tests (struct loop *loop)
{
basic_block bb = gimple_bb (elt->stmt);
edge exit_edge = EDGE_SUCC (bb, 0);
- struct tree_niter_desc niter;
+ class tree_niter_desc niter;
if (!loop_exit_edge_p (loop, exit_edge))
exit_edge = EDGE_SUCC (bb, 1);
@@ -629,7 +629,7 @@ unloop_loops (bitmap loop_closed_ssa_invalidated,
{
while (loops_to_unloop.length ())
{
- struct loop *loop = loops_to_unloop.pop ();
+ class loop *loop = loops_to_unloop.pop ();
int n_unroll = loops_to_unloop_nunroll.pop ();
basic_block latch = loop->latch;
edge latch_edge = loop_latch_edge (loop);
@@ -688,7 +688,7 @@ unloop_loops (bitmap loop_closed_ssa_invalidated,
a summary of the unroll to the dump file. */
static bool
-try_unroll_loop_completely (struct loop *loop,
+try_unroll_loop_completely (class loop *loop,
edge exit, tree niter, bool may_be_zero,
enum unroll_level ul,
HOST_WIDE_INT maxiter,
@@ -986,7 +986,7 @@ estimated_peeled_sequence_size (struct loop_size *size,
Parameters are the same as for try_unroll_loops_completely */
static bool
-try_peel_loop (struct loop *loop,
+try_peel_loop (class loop *loop,
edge exit, tree niter, bool may_be_zero,
HOST_WIDE_INT maxiter)
{
@@ -1155,7 +1155,7 @@ try_peel_loop (struct loop *loop,
Returns true if cfg is changed. */
static bool
-canonicalize_loop_induction_variables (struct loop *loop,
+canonicalize_loop_induction_variables (class loop *loop,
bool create_iv, enum unroll_level ul,
bool try_eval, bool allow_peel)
{
@@ -1164,7 +1164,7 @@ canonicalize_loop_induction_variables (struct loop *loop,
HOST_WIDE_INT maxiter;
bool modified = false;
dump_user_location_t locus;
- struct tree_niter_desc niter_desc;
+ class tree_niter_desc niter_desc;
bool may_be_zero = false;
/* For unrolling allow conditional constant or zero iterations, thus
@@ -1282,7 +1282,7 @@ canonicalize_loop_induction_variables (struct loop *loop,
unsigned int
canonicalize_induction_variables (void)
{
- struct loop *loop;
+ class loop *loop;
bool changed = false;
bool irred_invalidated = false;
bitmap loop_closed_ssa_invalidated = BITMAP_ALLOC (NULL);
@@ -1324,11 +1324,11 @@ canonicalize_induction_variables (void)
static bool
tree_unroll_loops_completely_1 (bool may_increase_size, bool unroll_outer,
- bitmap father_bbs, struct loop *loop)
+ bitmap father_bbs, class loop *loop)
{
- struct loop *loop_father;
+ class loop *loop_father;
bool changed = false;
- struct loop *inner;
+ class loop *inner;
enum unroll_level ul;
unsigned num = number_of_loops (cfun);
diff --git a/gcc/tree-ssa-loop-ivopts.c b/gcc/tree-ssa-loop-ivopts.c
index cb189aba533..fd5e99b3886 100644
--- a/gcc/tree-ssa-loop-ivopts.c
+++ b/gcc/tree-ssa-loop-ivopts.c
@@ -121,7 +121,7 @@ along with GCC; see the file COPYING3. If not see
exists. */
static inline HOST_WIDE_INT
-avg_loop_niter (struct loop *loop)
+avg_loop_niter (class loop *loop)
{
HOST_WIDE_INT niter = estimated_stmt_executions_int (loop);
if (niter == -1)
@@ -398,7 +398,7 @@ struct iv_group
/* Number of IV candidates in the cost_map. */
unsigned n_map_members;
/* The costs wrto the iv candidates. */
- struct cost_pair *cost_map;
+ class cost_pair *cost_map;
/* The selected candidate for the group. */
struct iv_cand *selected;
/* Uses in the group. */
@@ -551,7 +551,7 @@ iv_inv_expr_hasher::equal (const iv_inv_expr_ent *expr1,
struct ivopts_data
{
/* The currently optimized loop. */
- struct loop *current_loop;
+ class loop *current_loop;
location_t loop_loc;
/* Numbers of iterations for all exits of the current loop. */
@@ -629,7 +629,7 @@ public:
unsigned bad_groups;
/* Candidate assigned to a use, together with the related costs. */
- struct cost_pair **cand_for_group;
+ class cost_pair **cand_for_group;
/* Number of times each candidate is used. */
unsigned *n_cand_uses;
@@ -668,10 +668,10 @@ struct iv_ca_delta
struct iv_group *group;
/* An old assignment (for rollback purposes). */
- struct cost_pair *old_cp;
+ class cost_pair *old_cp;
/* A new assignment. */
- struct cost_pair *new_cp;
+ class cost_pair *new_cp;
/* Next change in the list. */
struct iv_ca_delta *next;
@@ -704,7 +704,7 @@ static comp_cost force_expr_to_var_cost (tree, bool);
/* The single loop exit if it dominates the latch, NULL otherwise. */
edge
-single_dom_exit (struct loop *loop)
+single_dom_exit (class loop *loop)
{
edge exit = single_exit (loop);
@@ -885,7 +885,7 @@ name_info (struct ivopts_data *data, tree name)
emitted in LOOP. */
static bool
-stmt_after_ip_normal_pos (struct loop *loop, gimple *stmt)
+stmt_after_ip_normal_pos (class loop *loop, gimple *stmt)
{
basic_block bb = ip_normal_pos (loop), sbb = gimple_bb (stmt);
@@ -926,7 +926,7 @@ stmt_after_inc_pos (struct iv_cand *cand, gimple *stmt, bool true_if_equal)
CAND is incremented in LOOP. */
static bool
-stmt_after_increment (struct loop *loop, struct iv_cand *cand, gimple *stmt)
+stmt_after_increment (class loop *loop, struct iv_cand *cand, gimple *stmt)
{
switch (cand->pos)
{
@@ -976,10 +976,10 @@ contains_abnormal_ssa_name_p (tree expr)
/* Returns the structure describing number of iterations determined from
EXIT of DATA->current_loop, or NULL if something goes wrong. */
-static struct tree_niter_desc *
+static class tree_niter_desc *
niter_for_exit (struct ivopts_data *data, edge exit)
{
- struct tree_niter_desc *desc;
+ class tree_niter_desc *desc;
tree_niter_desc **slot;
if (!data->niters)
@@ -995,7 +995,7 @@ niter_for_exit (struct ivopts_data *data, edge exit)
/* Try to determine number of iterations. We cannot safely work with ssa
names that appear in phi nodes on abnormal edges, so that we do not
create overlapping life ranges for them (PR 27283). */
- desc = XNEW (struct tree_niter_desc);
+ desc = XNEW (class tree_niter_desc);
if (!number_of_iterations_exit (data->current_loop,
exit, desc, true)
|| contains_abnormal_ssa_name_p (desc->niter))
@@ -1015,7 +1015,7 @@ niter_for_exit (struct ivopts_data *data, edge exit)
single dominating exit of DATA->current_loop, or NULL if something
goes wrong. */
-static struct tree_niter_desc *
+static class tree_niter_desc *
niter_for_single_dom_exit (struct ivopts_data *data)
{
edge exit = single_dom_exit (data->current_loop);
@@ -1246,7 +1246,7 @@ find_bivs (struct ivopts_data *data)
affine_iv iv;
tree step, type, base, stop;
bool found = false;
- struct loop *loop = data->current_loop;
+ class loop *loop = data->current_loop;
gphi_iterator psi;
for (psi = gsi_start_phis (loop->header); !gsi_end_p (psi); gsi_next (&psi))
@@ -1304,7 +1304,7 @@ mark_bivs (struct ivopts_data *data)
gimple *def;
tree var;
struct iv *iv, *incr_iv;
- struct loop *loop = data->current_loop;
+ class loop *loop = data->current_loop;
basic_block incr_bb;
gphi_iterator psi;
@@ -1351,7 +1351,7 @@ static bool
find_givs_in_stmt_scev (struct ivopts_data *data, gimple *stmt, affine_iv *iv)
{
tree lhs, stop;
- struct loop *loop = data->current_loop;
+ class loop *loop = data->current_loop;
iv->base = NULL_TREE;
iv->step = NULL_TREE;
@@ -1415,7 +1415,7 @@ find_givs_in_bb (struct ivopts_data *data, basic_block bb)
static void
find_givs (struct ivopts_data *data)
{
- struct loop *loop = data->current_loop;
+ class loop *loop = data->current_loop;
basic_block *body = get_loop_body_in_dom_order (loop);
unsigned i;
@@ -1441,7 +1441,7 @@ find_induction_variables (struct ivopts_data *data)
if (dump_file && (dump_flags & TDF_DETAILS))
{
- struct tree_niter_desc *niter = niter_for_single_dom_exit (data);
+ class tree_niter_desc *niter = niter_for_single_dom_exit (data);
if (niter)
{
@@ -1741,8 +1741,8 @@ find_interesting_uses_cond (struct ivopts_data *data, gimple *stmt)
outside of the returned loop. Returns NULL if EXPR is not
even obviously invariant in LOOP. */
-struct loop *
-outermost_invariant_loop_for_expr (struct loop *loop, tree expr)
+class loop *
+outermost_invariant_loop_for_expr (class loop *loop, tree expr)
{
basic_block def_bb;
unsigned i, len;
@@ -1771,7 +1771,7 @@ outermost_invariant_loop_for_expr (struct loop *loop, tree expr)
len = TREE_OPERAND_LENGTH (expr);
for (i = 0; i < len; i++)
{
- struct loop *ivloop;
+ class loop *ivloop;
if (!TREE_OPERAND (expr, i))
continue;
@@ -1789,7 +1789,7 @@ outermost_invariant_loop_for_expr (struct loop *loop, tree expr)
should not be the function body. */
bool
-expr_invariant_in_loop_p (struct loop *loop, tree expr)
+expr_invariant_in_loop_p (class loop *loop, tree expr)
{
basic_block def_bb;
unsigned i, len;
@@ -1982,7 +1982,7 @@ idx_find_step (tree base, tree *idx, void *data)
struct iv *iv;
bool use_overflow_semantics = false;
tree step, iv_base, iv_step, lbound, off;
- struct loop *loop = dta->ivopts_data->current_loop;
+ class loop *loop = dta->ivopts_data->current_loop;
/* If base is a component ref, require that the offset of the reference
be invariant. */
@@ -3140,7 +3140,7 @@ add_candidate_1 (struct ivopts_data *data,
is already nonempty. */
static bool
-allow_ip_end_pos_p (struct loop *loop)
+allow_ip_end_pos_p (class loop *loop)
{
if (!ip_normal_pos (loop))
return true;
@@ -3331,8 +3331,8 @@ static void
record_common_cand (struct ivopts_data *data, tree base,
tree step, struct iv_use *use)
{
- struct iv_common_cand ent;
- struct iv_common_cand **slot;
+ class iv_common_cand ent;
+ class iv_common_cand **slot;
ent.base = base;
ent.step = step;
@@ -3361,10 +3361,10 @@ static int
common_cand_cmp (const void *p1, const void *p2)
{
unsigned n1, n2;
- const struct iv_common_cand *const *const ccand1
- = (const struct iv_common_cand *const *)p1;
- const struct iv_common_cand *const *const ccand2
- = (const struct iv_common_cand *const *)p2;
+ const class iv_common_cand *const *const ccand1
+ = (const class iv_common_cand *const *)p1;
+ const class iv_common_cand *const *const ccand2
+ = (const class iv_common_cand *const *)p2;
n1 = (*ccand1)->uses.length ();
n2 = (*ccand2)->uses.length ();
@@ -3382,7 +3382,7 @@ add_iv_candidate_derived_from_uses (struct ivopts_data *data)
data->iv_common_cands.qsort (common_cand_cmp);
for (i = 0; i < data->iv_common_cands.length (); i++)
{
- struct iv_common_cand *ptr = data->iv_common_cands[i];
+ class iv_common_cand *ptr = data->iv_common_cands[i];
/* Only add IV candidate if it's derived from multiple uses. */
if (ptr->uses.length () <= 1)
@@ -3558,7 +3558,7 @@ alloc_use_cost_map (struct ivopts_data *data)
}
group->n_map_members = size;
- group->cost_map = XCNEWVEC (struct cost_pair, size);
+ group->cost_map = XCNEWVEC (class cost_pair, size);
}
}
@@ -3614,12 +3614,12 @@ found:
/* Gets cost of (GROUP, CAND) pair. */
-static struct cost_pair *
+static class cost_pair *
get_group_iv_cost (struct ivopts_data *data, struct iv_group *group,
struct iv_cand *cand)
{
unsigned i, s;
- struct cost_pair *ret;
+ class cost_pair *ret;
if (!cand)
return NULL;
@@ -3753,7 +3753,7 @@ prepare_decl_rtl (tree *expr_p, int *ws, void *data)
static bool ATTRIBUTE_UNUSED
generic_predict_doloop_p (struct ivopts_data *data)
{
- struct loop *loop = data->current_loop;
+ class loop *loop = data->current_loop;
/* Call target hook for target dependent checks. */
if (!targetm.predict_doloop_p (loop))
@@ -3768,7 +3768,7 @@ generic_predict_doloop_p (struct ivopts_data *data)
suitable or not. Keep it as simple as possible, feel free to extend it
if you find any multiple exits cases matter. */
edge exit = single_dom_exit (loop);
- struct tree_niter_desc *niter_desc;
+ class tree_niter_desc *niter_desc;
if (!exit || !(niter_desc = niter_for_exit (data, exit)))
{
if (dump_file && (dump_flags & TDF_DETAILS))
@@ -3832,7 +3832,7 @@ computation_cost (tree expr, bool speed)
/* Returns variable containing the value of candidate CAND at statement AT. */
static tree
-var_at_stmt (struct loop *loop, struct iv_cand *cand, gimple *stmt)
+var_at_stmt (class loop *loop, struct iv_cand *cand, gimple *stmt)
{
if (stmt_after_increment (loop, cand, stmt))
return cand->var_after;
@@ -3883,9 +3883,9 @@ determine_common_wider_type (tree *a, tree *b)
non-null. Returns false if USE cannot be expressed using CAND. */
static bool
-get_computation_aff_1 (struct loop *loop, gimple *at, struct iv_use *use,
- struct iv_cand *cand, struct aff_tree *aff_inv,
- struct aff_tree *aff_var, widest_int *prat = NULL)
+get_computation_aff_1 (class loop *loop, gimple *at, struct iv_use *use,
+ struct iv_cand *cand, class aff_tree *aff_inv,
+ class aff_tree *aff_var, widest_int *prat = NULL)
{
tree ubase = use->iv->base, ustep = use->iv->step;
tree cbase = cand->iv->base, cstep = cand->iv->step;
@@ -3989,8 +3989,8 @@ get_computation_aff_1 (struct loop *loop, gimple *at, struct iv_use *use,
form into AFF. Returns false if USE cannot be expressed using CAND. */
static bool
-get_computation_aff (struct loop *loop, gimple *at, struct iv_use *use,
- struct iv_cand *cand, struct aff_tree *aff)
+get_computation_aff (class loop *loop, gimple *at, struct iv_use *use,
+ struct iv_cand *cand, class aff_tree *aff)
{
aff_tree aff_var;
@@ -4027,7 +4027,7 @@ get_use_type (struct iv_use *use)
CAND at statement AT in LOOP. The computation is unshared. */
static tree
-get_computation_at (struct loop *loop, gimple *at,
+get_computation_at (class loop *loop, gimple *at,
struct iv_use *use, struct iv_cand *cand)
{
aff_tree aff;
@@ -4809,7 +4809,7 @@ determine_group_iv_cost_address (struct ivopts_data *data,
stores it to VAL. */
static void
-cand_value_at (struct loop *loop, struct iv_cand *cand, gimple *at, tree niter,
+cand_value_at (class loop *loop, struct iv_cand *cand, gimple *at, tree niter,
aff_tree *val)
{
aff_tree step, delta, nit;
@@ -4868,7 +4868,7 @@ iv_period (struct iv *iv)
static enum tree_code
iv_elimination_compare (struct ivopts_data *data, struct iv_use *use)
{
- struct loop *loop = data->current_loop;
+ class loop *loop = data->current_loop;
basic_block ex_bb;
edge exit;
@@ -4992,10 +4992,10 @@ difference_cannot_overflow_p (struct ivopts_data *data, tree base, tree offset)
static bool
iv_elimination_compare_lt (struct ivopts_data *data,
struct iv_cand *cand, enum tree_code *comp_p,
- struct tree_niter_desc *niter)
+ class tree_niter_desc *niter)
{
tree cand_type, a, b, mbz, nit_type = TREE_TYPE (niter->niter), offset;
- struct aff_tree nit, tmpa, tmpb;
+ class aff_tree nit, tmpa, tmpb;
enum tree_code comp;
HOST_WIDE_INT step;
@@ -5094,9 +5094,9 @@ may_eliminate_iv (struct ivopts_data *data,
basic_block ex_bb;
edge exit;
tree period;
- struct loop *loop = data->current_loop;
+ class loop *loop = data->current_loop;
aff_tree bnd;
- struct tree_niter_desc *desc = NULL;
+ class tree_niter_desc *desc = NULL;
if (TREE_CODE (cand->iv->step) != INTEGER_CST)
return false;
@@ -5704,7 +5704,7 @@ determine_set_costs (struct ivopts_data *data)
gphi *phi;
gphi_iterator psi;
tree op;
- struct loop *loop = data->current_loop;
+ class loop *loop = data->current_loop;
bitmap_iterator bi;
if (dump_file && (dump_flags & TDF_DETAILS))
@@ -5761,7 +5761,7 @@ determine_set_costs (struct ivopts_data *data)
/* Returns true if A is a cheaper cost pair than B. */
static bool
-cheaper_cost_pair (struct cost_pair *a, struct cost_pair *b)
+cheaper_cost_pair (class cost_pair *a, class cost_pair *b)
{
if (!a)
return false;
@@ -5786,7 +5786,7 @@ cheaper_cost_pair (struct cost_pair *a, struct cost_pair *b)
for more expensive, equal and cheaper respectively. */
static int
-compare_cost_pair (struct cost_pair *a, struct cost_pair *b)
+compare_cost_pair (class cost_pair *a, class cost_pair *b)
{
if (cheaper_cost_pair (a, b))
return -1;
@@ -5798,8 +5798,8 @@ compare_cost_pair (struct cost_pair *a, struct cost_pair *b)
/* Returns candidate by that USE is expressed in IVS. */
-static struct cost_pair *
-iv_ca_cand_for_group (struct iv_ca *ivs, struct iv_group *group)
+static class cost_pair *
+iv_ca_cand_for_group (class iv_ca *ivs, struct iv_group *group)
{
return ivs->cand_for_group[group->id];
}
@@ -5807,7 +5807,7 @@ iv_ca_cand_for_group (struct iv_ca *ivs, struct iv_group *group)
/* Computes the cost field of IVS structure. */
static void
-iv_ca_recount_cost (struct ivopts_data *data, struct iv_ca *ivs)
+iv_ca_recount_cost (struct ivopts_data *data, class iv_ca *ivs)
{
comp_cost cost = ivs->cand_use_cost;
@@ -5820,7 +5820,7 @@ iv_ca_recount_cost (struct ivopts_data *data, struct iv_ca *ivs)
and IVS. */
static void
-iv_ca_set_remove_invs (struct iv_ca *ivs, bitmap invs, unsigned *n_inv_uses)
+iv_ca_set_remove_invs (class iv_ca *ivs, bitmap invs, unsigned *n_inv_uses)
{
bitmap_iterator bi;
unsigned iid;
@@ -5840,11 +5840,11 @@ iv_ca_set_remove_invs (struct iv_ca *ivs, bitmap invs, unsigned *n_inv_uses)
/* Set USE not to be expressed by any candidate in IVS. */
static void
-iv_ca_set_no_cp (struct ivopts_data *data, struct iv_ca *ivs,
+iv_ca_set_no_cp (struct ivopts_data *data, class iv_ca *ivs,
struct iv_group *group)
{
unsigned gid = group->id, cid;
- struct cost_pair *cp;
+ class cost_pair *cp;
cp = ivs->cand_for_group[gid];
if (!cp)
@@ -5874,7 +5874,7 @@ iv_ca_set_no_cp (struct ivopts_data *data, struct iv_ca *ivs,
IVS. */
static void
-iv_ca_set_add_invs (struct iv_ca *ivs, bitmap invs, unsigned *n_inv_uses)
+iv_ca_set_add_invs (class iv_ca *ivs, bitmap invs, unsigned *n_inv_uses)
{
bitmap_iterator bi;
unsigned iid;
@@ -5894,8 +5894,8 @@ iv_ca_set_add_invs (struct iv_ca *ivs, bitmap invs, unsigned *n_inv_uses)
/* Set cost pair for GROUP in set IVS to CP. */
static void
-iv_ca_set_cp (struct ivopts_data *data, struct iv_ca *ivs,
- struct iv_group *group, struct cost_pair *cp)
+iv_ca_set_cp (struct ivopts_data *data, class iv_ca *ivs,
+ struct iv_group *group, class cost_pair *cp)
{
unsigned gid = group->id, cid;
@@ -5933,10 +5933,10 @@ iv_ca_set_cp (struct ivopts_data *data, struct iv_ca *ivs,
set IVS don't give any result. */
static void
-iv_ca_add_group (struct ivopts_data *data, struct iv_ca *ivs,
+iv_ca_add_group (struct ivopts_data *data, class iv_ca *ivs,
struct iv_group *group)
{
- struct cost_pair *best_cp = NULL, *cp;
+ class cost_pair *best_cp = NULL, *cp;
bitmap_iterator bi;
unsigned i;
struct iv_cand *cand;
@@ -5970,7 +5970,7 @@ iv_ca_add_group (struct ivopts_data *data, struct iv_ca *ivs,
/* Get cost for assignment IVS. */
static comp_cost
-iv_ca_cost (struct iv_ca *ivs)
+iv_ca_cost (class iv_ca *ivs)
{
/* This was a conditional expression but it triggered a bug in
Sun C 5.5. */
@@ -5985,9 +5985,9 @@ iv_ca_cost (struct iv_ca *ivs)
respectively. */
static int
-iv_ca_compare_deps (struct ivopts_data *data, struct iv_ca *ivs,
- struct iv_group *group, struct cost_pair *old_cp,
- struct cost_pair *new_cp)
+iv_ca_compare_deps (struct ivopts_data *data, class iv_ca *ivs,
+ struct iv_group *group, class cost_pair *old_cp,
+ class cost_pair *new_cp)
{
gcc_assert (old_cp && new_cp && old_cp != new_cp);
unsigned old_n_invs = ivs->n_invs;
@@ -6002,8 +6002,8 @@ iv_ca_compare_deps (struct ivopts_data *data, struct iv_ca *ivs,
it before NEXT. */
static struct iv_ca_delta *
-iv_ca_delta_add (struct iv_group *group, struct cost_pair *old_cp,
- struct cost_pair *new_cp, struct iv_ca_delta *next)
+iv_ca_delta_add (struct iv_group *group, class cost_pair *old_cp,
+ class cost_pair *new_cp, struct iv_ca_delta *next)
{
struct iv_ca_delta *change = XNEW (struct iv_ca_delta);
@@ -6059,10 +6059,10 @@ iv_ca_delta_reverse (struct iv_ca_delta *delta)
reverted instead. */
static void
-iv_ca_delta_commit (struct ivopts_data *data, struct iv_ca *ivs,
+iv_ca_delta_commit (struct ivopts_data *data, class iv_ca *ivs,
struct iv_ca_delta *delta, bool forward)
{
- struct cost_pair *from, *to;
+ class cost_pair *from, *to;
struct iv_ca_delta *act;
if (!forward)
@@ -6083,7 +6083,7 @@ iv_ca_delta_commit (struct ivopts_data *data, struct iv_ca *ivs,
/* Returns true if CAND is used in IVS. */
static bool
-iv_ca_cand_used_p (struct iv_ca *ivs, struct iv_cand *cand)
+iv_ca_cand_used_p (class iv_ca *ivs, struct iv_cand *cand)
{
return ivs->n_cand_uses[cand->id] > 0;
}
@@ -6091,7 +6091,7 @@ iv_ca_cand_used_p (struct iv_ca *ivs, struct iv_cand *cand)
/* Returns number of induction variable candidates in the set IVS. */
static unsigned
-iv_ca_n_cands (struct iv_ca *ivs)
+iv_ca_n_cands (class iv_ca *ivs)
{
return ivs->n_cands;
}
@@ -6114,14 +6114,14 @@ iv_ca_delta_free (struct iv_ca_delta **delta)
/* Allocates new iv candidates assignment. */
-static struct iv_ca *
+static class iv_ca *
iv_ca_new (struct ivopts_data *data)
{
- struct iv_ca *nw = XNEW (struct iv_ca);
+ class iv_ca *nw = XNEW (class iv_ca);
nw->upto = 0;
nw->bad_groups = 0;
- nw->cand_for_group = XCNEWVEC (struct cost_pair *,
+ nw->cand_for_group = XCNEWVEC (class cost_pair *,
data->vgroups.length ());
nw->n_cand_uses = XCNEWVEC (unsigned, data->vcands.length ());
nw->cands = BITMAP_ALLOC (NULL);
@@ -6139,7 +6139,7 @@ iv_ca_new (struct ivopts_data *data)
/* Free memory occupied by the set IVS. */
static void
-iv_ca_free (struct iv_ca **ivs)
+iv_ca_free (class iv_ca **ivs)
{
free ((*ivs)->cand_for_group);
free ((*ivs)->n_cand_uses);
@@ -6153,7 +6153,7 @@ iv_ca_free (struct iv_ca **ivs)
/* Dumps IVS to FILE. */
static void
-iv_ca_dump (struct ivopts_data *data, FILE *file, struct iv_ca *ivs)
+iv_ca_dump (struct ivopts_data *data, FILE *file, class iv_ca *ivs)
{
unsigned i;
comp_cost cost = iv_ca_cost (ivs);
@@ -6168,7 +6168,7 @@ iv_ca_dump (struct ivopts_data *data, FILE *file, struct iv_ca *ivs)
for (i = 0; i < ivs->upto; i++)
{
struct iv_group *group = data->vgroups[i];
- struct cost_pair *cp = iv_ca_cand_for_group (ivs, group);
+ class cost_pair *cp = iv_ca_cand_for_group (ivs, group);
if (cp)
fprintf (file, " group:%d --> iv_cand:%d, cost=("
"%" PRId64 ",%d)\n", group->id, cp->cand->id,
@@ -6204,14 +6204,14 @@ iv_ca_dump (struct ivopts_data *data, FILE *file, struct iv_ca *ivs)
the function will try to find a solution with mimimal iv candidates. */
static comp_cost
-iv_ca_extend (struct ivopts_data *data, struct iv_ca *ivs,
+iv_ca_extend (struct ivopts_data *data, class iv_ca *ivs,
struct iv_cand *cand, struct iv_ca_delta **delta,
unsigned *n_ivs, bool min_ncand)
{
unsigned i;
comp_cost cost;
struct iv_group *group;
- struct cost_pair *old_cp, *new_cp;
+ class cost_pair *old_cp, *new_cp;
*delta = NULL;
for (i = 0; i < ivs->upto; i++)
@@ -6257,13 +6257,13 @@ iv_ca_extend (struct ivopts_data *data, struct iv_ca *ivs,
the candidate with which we start narrowing. */
static comp_cost
-iv_ca_narrow (struct ivopts_data *data, struct iv_ca *ivs,
+iv_ca_narrow (struct ivopts_data *data, class iv_ca *ivs,
struct iv_cand *cand, struct iv_cand *start,
struct iv_ca_delta **delta)
{
unsigned i, ci;
struct iv_group *group;
- struct cost_pair *old_cp, *new_cp, *cp;
+ class cost_pair *old_cp, *new_cp, *cp;
bitmap_iterator bi;
struct iv_cand *cnd;
comp_cost cost, best_cost, acost;
@@ -6351,7 +6351,7 @@ iv_ca_narrow (struct ivopts_data *data, struct iv_ca *ivs,
differences in DELTA. */
static comp_cost
-iv_ca_prune (struct ivopts_data *data, struct iv_ca *ivs,
+iv_ca_prune (struct ivopts_data *data, class iv_ca *ivs,
struct iv_cand *except_cand, struct iv_ca_delta **delta)
{
bitmap_iterator bi;
@@ -6400,13 +6400,13 @@ iv_ca_prune (struct ivopts_data *data, struct iv_ca *ivs,
cheaper local cost for GROUP than BEST_CP. Return pointer to
the corresponding cost_pair, otherwise just return BEST_CP. */
-static struct cost_pair*
+static class cost_pair*
cheaper_cost_with_cand (struct ivopts_data *data, struct iv_group *group,
unsigned int cand_idx, struct iv_cand *old_cand,
- struct cost_pair *best_cp)
+ class cost_pair *best_cp)
{
struct iv_cand *cand;
- struct cost_pair *cp;
+ class cost_pair *cp;
gcc_assert (old_cand != NULL && best_cp != NULL);
if (cand_idx == old_cand->id)
@@ -6428,7 +6428,7 @@ cheaper_cost_with_cand (struct ivopts_data *data, struct iv_group *group,
candidate replacement in list DELTA. */
static comp_cost
-iv_ca_replace (struct ivopts_data *data, struct iv_ca *ivs,
+iv_ca_replace (struct ivopts_data *data, class iv_ca *ivs,
struct iv_ca_delta **delta)
{
bitmap_iterator bi, bj;
@@ -6436,7 +6436,7 @@ iv_ca_replace (struct ivopts_data *data, struct iv_ca *ivs,
struct iv_cand *cand;
comp_cost orig_cost, acost;
struct iv_ca_delta *act_delta, *tmp_delta;
- struct cost_pair *old_cp, *best_cp = NULL;
+ class cost_pair *old_cp, *best_cp = NULL;
*delta = NULL;
orig_cost = iv_ca_cost (ivs);
@@ -6503,7 +6503,7 @@ iv_ca_replace (struct ivopts_data *data, struct iv_ca *ivs,
based on any memory object. */
static bool
-try_add_cand_for (struct ivopts_data *data, struct iv_ca *ivs,
+try_add_cand_for (struct ivopts_data *data, class iv_ca *ivs,
struct iv_group *group, bool originalp)
{
comp_cost best_cost, act_cost;
@@ -6511,7 +6511,7 @@ try_add_cand_for (struct ivopts_data *data, struct iv_ca *ivs,
bitmap_iterator bi;
struct iv_cand *cand;
struct iv_ca_delta *best_delta = NULL, *act_delta;
- struct cost_pair *cp;
+ class cost_pair *cp;
iv_ca_add_group (data, ivs, group);
best_cost = iv_ca_cost (ivs);
@@ -6615,11 +6615,11 @@ try_add_cand_for (struct ivopts_data *data, struct iv_ca *ivs,
/* Finds an initial assignment of candidates to uses. */
-static struct iv_ca *
+static class iv_ca *
get_initial_solution (struct ivopts_data *data, bool originalp)
{
unsigned i;
- struct iv_ca *ivs = iv_ca_new (data);
+ class iv_ca *ivs = iv_ca_new (data);
for (i = 0; i < data->vgroups.length (); i++)
if (!try_add_cand_for (data, ivs, data->vgroups[i], originalp))
@@ -6637,7 +6637,7 @@ get_initial_solution (struct ivopts_data *data, bool originalp)
static bool
try_improve_iv_set (struct ivopts_data *data,
- struct iv_ca *ivs, bool *try_replace_p)
+ class iv_ca *ivs, bool *try_replace_p)
{
unsigned i, n_ivs;
comp_cost acost, best_cost = iv_ca_cost (ivs);
@@ -6708,10 +6708,10 @@ try_improve_iv_set (struct ivopts_data *data,
greedy heuristic -- we try to replace at most one candidate in the selected
solution and remove the unused ivs while this improves the cost. */
-static struct iv_ca *
+static class iv_ca *
find_optimal_iv_set_1 (struct ivopts_data *data, bool originalp)
{
- struct iv_ca *set;
+ class iv_ca *set;
bool try_replace_p = true;
/* Get the initial solution. */
@@ -6749,12 +6749,12 @@ find_optimal_iv_set_1 (struct ivopts_data *data, bool originalp)
return set;
}
-static struct iv_ca *
+static class iv_ca *
find_optimal_iv_set (struct ivopts_data *data)
{
unsigned i;
comp_cost cost, origcost;
- struct iv_ca *set, *origset;
+ class iv_ca *set, *origset;
/* Determine the cost based on a strategy that starts with original IVs,
and try again using a strategy that prefers candidates not based
@@ -6850,7 +6850,7 @@ create_new_iv (struct ivopts_data *data, struct iv_cand *cand)
/* Creates new induction variables described in SET. */
static void
-create_new_ivs (struct ivopts_data *data, struct iv_ca *set)
+create_new_ivs (struct ivopts_data *data, class iv_ca *set)
{
unsigned i;
struct iv_cand *cand;
@@ -7204,7 +7204,7 @@ rewrite_use_compare (struct ivopts_data *data,
gimple_stmt_iterator bsi = gsi_for_stmt (use->stmt);
enum tree_code compare;
struct iv_group *group = data->vgroups[use->group_id];
- struct cost_pair *cp = get_group_iv_cost (data, group, cand);
+ class cost_pair *cp = get_group_iv_cost (data, group, cand);
bound = cp->value;
if (bound)
@@ -7416,7 +7416,7 @@ remove_unused_ivs (struct ivopts_data *data, bitmap toremove)
}
}
-/* Frees memory occupied by struct tree_niter_desc in *VALUE. Callback
+/* Frees memory occupied by class tree_niter_desc in *VALUE. Callback
for hash_map::traverse. */
bool
@@ -7599,11 +7599,11 @@ determine_scaling_factor (struct ivopts_data *data, basic_block *body)
/* Optimizes the LOOP. Returns true if anything changed. */
static bool
-tree_ssa_iv_optimize_loop (struct ivopts_data *data, struct loop *loop,
+tree_ssa_iv_optimize_loop (struct ivopts_data *data, class loop *loop,
bitmap toremove)
{
bool changed = false;
- struct iv_ca *iv_ca;
+ class iv_ca *iv_ca;
edge exit = single_dom_exit (loop);
basic_block *body;
@@ -7689,7 +7689,7 @@ finish:
void
tree_ssa_iv_optimize (void)
{
- struct loop *loop;
+ class loop *loop;
struct ivopts_data data;
auto_bitmap toremove;
diff --git a/gcc/tree-ssa-loop-ivopts.h b/gcc/tree-ssa-loop-ivopts.h
index 6f21e63a622..1ad4a77c356 100644
--- a/gcc/tree-ssa-loop-ivopts.h
+++ b/gcc/tree-ssa-loop-ivopts.h
@@ -20,18 +20,18 @@ along with GCC; see the file COPYING3. If not see
#ifndef GCC_TREE_SSA_LOOP_IVOPTS_H
#define GCC_TREE_SSA_LOOP_IVOPTS_H
-extern edge single_dom_exit (struct loop *);
+extern edge single_dom_exit (class loop *);
extern void dump_iv (FILE *, struct iv *);
extern void dump_use (FILE *, struct iv_use *);
extern void dump_uses (FILE *, struct ivopts_data *);
extern void dump_cand (FILE *, struct iv_cand *);
extern bool contains_abnormal_ssa_name_p (tree);
-extern struct loop *outermost_invariant_loop_for_expr (struct loop *, tree);
-extern bool expr_invariant_in_loop_p (struct loop *, tree);
+extern class loop *outermost_invariant_loop_for_expr (class loop *, tree);
+extern bool expr_invariant_in_loop_p (class loop *, tree);
extern tree strip_offset (tree, poly_uint64_pod *);
bool may_be_nonaddressable_p (tree expr);
void tree_ssa_iv_optimize (void);
-void create_canonical_iv (struct loop *, edge, tree,
+void create_canonical_iv (class loop *, edge, tree,
tree * = NULL, tree * = NULL);
#endif /* GCC_TREE_SSA_LOOP_IVOPTS_H */
diff --git a/gcc/tree-ssa-loop-manip.c b/gcc/tree-ssa-loop-manip.c
index ecbe212369e..6a1bbaae573 100644
--- a/gcc/tree-ssa-loop-manip.c
+++ b/gcc/tree-ssa-loop-manip.c
@@ -58,7 +58,7 @@ static bitmap_obstack loop_renamer_obstack;
VAR_AFTER (unless they are NULL). */
void
-create_iv (tree base, tree step, tree var, struct loop *loop,
+create_iv (tree base, tree step, tree var, class loop *loop,
gimple_stmt_iterator *incr_pos, bool after,
tree *var_before, tree *var_after)
{
@@ -156,8 +156,8 @@ create_iv (tree base, tree step, tree var, struct loop *loop,
/* Return the innermost superloop LOOP of USE_LOOP that is a superloop of
both DEF_LOOP and USE_LOOP. */
-static inline struct loop *
-find_sibling_superloop (struct loop *use_loop, struct loop *def_loop)
+static inline class loop *
+find_sibling_superloop (class loop *use_loop, class loop *def_loop)
{
unsigned ud = loop_depth (use_loop);
unsigned dd = loop_depth (def_loop);
@@ -196,7 +196,7 @@ compute_live_loop_exits (bitmap live_exits, bitmap use_blocks,
{
unsigned i;
bitmap_iterator bi;
- struct loop *def_loop = def_bb->loop_father;
+ class loop *def_loop = def_bb->loop_father;
unsigned def_loop_depth = loop_depth (def_loop);
bitmap def_loop_exits;
@@ -208,7 +208,7 @@ compute_live_loop_exits (bitmap live_exits, bitmap use_blocks,
EXECUTE_IF_SET_IN_BITMAP (use_blocks, 0, i, bi)
{
basic_block use_bb = BASIC_BLOCK_FOR_FN (cfun, i);
- struct loop *use_loop = use_bb->loop_father;
+ class loop *use_loop = use_bb->loop_father;
gcc_checking_assert (def_loop != use_loop
&& ! flow_loop_nested_p (def_loop, use_loop));
if (! flow_loop_nested_p (use_loop, def_loop))
@@ -234,7 +234,7 @@ compute_live_loop_exits (bitmap live_exits, bitmap use_blocks,
FOR_EACH_EDGE (e, ei, bb->preds)
{
basic_block pred = e->src;
- struct loop *pred_loop = pred->loop_father;
+ class loop *pred_loop = pred->loop_father;
unsigned pred_loop_depth = loop_depth (pred_loop);
bool pred_visited;
@@ -268,7 +268,7 @@ compute_live_loop_exits (bitmap live_exits, bitmap use_blocks,
}
def_loop_exits = BITMAP_ALLOC (&loop_renamer_obstack);
- for (struct loop *loop = def_loop;
+ for (class loop *loop = def_loop;
loop != current_loops->tree_root;
loop = loop_outer (loop))
bitmap_ior_into (def_loop_exits, loop_exits[loop->num]);
@@ -293,7 +293,7 @@ add_exit_phi (basic_block exit, tree var)
basic_block def_bb = gimple_bb (def_stmt);
FOR_EACH_EDGE (e, ei, exit->preds)
{
- struct loop *aloop = find_common_loop (def_bb->loop_father,
+ class loop *aloop = find_common_loop (def_bb->loop_father,
e->src->loop_father);
if (!flow_bb_inside_loop_p (aloop, e->dest))
break;
@@ -357,7 +357,7 @@ add_exit_phis (bitmap names_to_rename, bitmap *use_blocks, bitmap *loop_exits)
static void
get_loops_exits (bitmap *loop_exits)
{
- struct loop *loop;
+ class loop *loop;
unsigned j;
edge e;
@@ -383,7 +383,7 @@ find_uses_to_rename_use (basic_block bb, tree use, bitmap *use_blocks,
{
unsigned ver;
basic_block def_bb;
- struct loop *def_loop;
+ class loop *def_loop;
if (TREE_CODE (use) != SSA_NAME)
return;
@@ -532,7 +532,7 @@ find_uses_to_rename_def (tree def, bitmap *use_blocks, bitmap need_phis)
USE_BLOCKS. Record the SSA names that will need exit PHIs in NEED_PHIS. */
static void
-find_uses_to_rename_in_loop (struct loop *loop, bitmap *use_blocks,
+find_uses_to_rename_in_loop (class loop *loop, bitmap *use_blocks,
bitmap need_phis, int use_flags)
{
bool do_virtuals = (use_flags & SSA_OP_VIRTUAL_USES) != 0;
@@ -624,7 +624,7 @@ find_uses_to_rename_in_loop (struct loop *loop, bitmap *use_blocks,
void
rewrite_into_loop_closed_ssa_1 (bitmap changed_bbs, unsigned update_flag,
- int use_flags, struct loop *loop)
+ int use_flags, class loop *loop)
{
bitmap *use_blocks;
bitmap names_to_rename;
@@ -698,7 +698,7 @@ rewrite_into_loop_closed_ssa (bitmap changed_bbs, unsigned update_flag)
form. */
void
-rewrite_virtuals_into_loop_closed_ssa (struct loop *loop)
+rewrite_virtuals_into_loop_closed_ssa (class loop *loop)
{
rewrite_into_loop_closed_ssa_1 (NULL, 0, SSA_OP_VIRTUAL_USES, loop);
}
@@ -754,7 +754,7 @@ check_loop_closed_ssa_bb (basic_block bb)
if LOOP is NULL, otherwise, only LOOP is checked. */
DEBUG_FUNCTION void
-verify_loop_closed_ssa (bool verify_ssa_p, struct loop *loop)
+verify_loop_closed_ssa (bool verify_ssa_p, class loop *loop)
{
if (number_of_loops (cfun) <= 1)
return;
@@ -830,7 +830,7 @@ split_loop_exit_edge (edge exit, bool copy_constants_p)
variables incremented at the end of the LOOP. */
basic_block
-ip_end_pos (struct loop *loop)
+ip_end_pos (class loop *loop)
{
return loop->latch;
}
@@ -839,7 +839,7 @@ ip_end_pos (struct loop *loop)
variables incremented just before exit condition of a LOOP. */
basic_block
-ip_normal_pos (struct loop *loop)
+ip_normal_pos (class loop *loop)
{
gimple *last;
basic_block bb;
@@ -870,7 +870,7 @@ ip_normal_pos (struct loop *loop)
the increment should be inserted after *BSI. */
void
-standard_iv_increment_position (struct loop *loop, gimple_stmt_iterator *bsi,
+standard_iv_increment_position (class loop *loop, gimple_stmt_iterator *bsi,
bool *insert_after)
{
basic_block bb = ip_normal_pos (loop), latch = ip_end_pos (loop);
@@ -918,7 +918,7 @@ copy_phi_node_args (unsigned first_new_block)
after the loop has been duplicated. */
bool
-gimple_duplicate_loop_to_header_edge (struct loop *loop, edge e,
+gimple_duplicate_loop_to_header_edge (class loop *loop, edge e,
unsigned int ndupl, sbitmap wont_exit,
edge orig, vec<edge> *to_remove,
int flags)
@@ -950,8 +950,8 @@ gimple_duplicate_loop_to_header_edge (struct loop *loop, edge e,
of iterations of the loop is returned in NITER. */
bool
-can_unroll_loop_p (struct loop *loop, unsigned factor,
- struct tree_niter_desc *niter)
+can_unroll_loop_p (class loop *loop, unsigned factor,
+ class tree_niter_desc *niter)
{
edge exit;
@@ -997,7 +997,7 @@ can_unroll_loop_p (struct loop *loop, unsigned factor,
how the exit from the unrolled loop should be controlled. */
static void
-determine_exit_conditions (struct loop *loop, struct tree_niter_desc *desc,
+determine_exit_conditions (class loop *loop, class tree_niter_desc *desc,
unsigned factor, tree *enter_cond,
tree *exit_base, tree *exit_step,
enum tree_code *exit_cmp, tree *exit_bound)
@@ -1106,7 +1106,7 @@ determine_exit_conditions (struct loop *loop, struct tree_niter_desc *desc,
dominated by BB by NUM/DEN. */
static void
-scale_dominated_blocks_in_loop (struct loop *loop, basic_block bb,
+scale_dominated_blocks_in_loop (class loop *loop, basic_block bb,
profile_count num, profile_count den)
{
basic_block son;
@@ -1128,7 +1128,7 @@ scale_dominated_blocks_in_loop (struct loop *loop, basic_block bb,
/* Return estimated niter for LOOP after unrolling by FACTOR times. */
gcov_type
-niter_for_unrolled_loop (struct loop *loop, unsigned factor)
+niter_for_unrolled_loop (class loop *loop, unsigned factor)
{
gcc_assert (factor != 0);
bool profile_p = false;
@@ -1225,8 +1225,8 @@ niter_for_unrolled_loop (struct loop *loop, unsigned factor)
#define PROB_UNROLLED_LOOP_ENTERED 90
void
-tree_transform_and_unroll_loop (struct loop *loop, unsigned factor,
- edge exit, struct tree_niter_desc *desc,
+tree_transform_and_unroll_loop (class loop *loop, unsigned factor,
+ edge exit, class tree_niter_desc *desc,
transform_callback transform,
void *data)
{
@@ -1237,7 +1237,7 @@ tree_transform_and_unroll_loop (struct loop *loop, unsigned factor,
gphi *phi_old_loop, *phi_new_loop, *phi_rest;
gphi_iterator psi_old_loop, psi_new_loop;
tree init, next, new_init;
- struct loop *new_loop;
+ class loop *new_loop;
basic_block rest, exit_bb;
edge old_entry, new_entry, old_latch, precond_edge, new_exit;
edge new_nonexit, e;
@@ -1435,8 +1435,8 @@ tree_transform_and_unroll_loop (struct loop *loop, unsigned factor,
of the arguments is the same as for tree_transform_and_unroll_loop. */
void
-tree_unroll_loop (struct loop *loop, unsigned factor,
- edge exit, struct tree_niter_desc *desc)
+tree_unroll_loop (class loop *loop, unsigned factor,
+ edge exit, class tree_niter_desc *desc)
{
tree_transform_and_unroll_loop (loop, factor, exit, desc,
NULL, NULL);
@@ -1518,7 +1518,7 @@ rewrite_all_phi_nodes_with_iv (loop_p loop, tree main_iv)
created. */
tree
-canonicalize_loop_ivs (struct loop *loop, tree *nit, bool bump_in_latch)
+canonicalize_loop_ivs (class loop *loop, tree *nit, bool bump_in_latch)
{
unsigned precision = TYPE_PRECISION (TREE_TYPE (*nit));
unsigned original_precision = precision;
diff --git a/gcc/tree-ssa-loop-manip.h b/gcc/tree-ssa-loop-manip.h
index 00206597e4d..8263a679288 100644
--- a/gcc/tree-ssa-loop-manip.h
+++ b/gcc/tree-ssa-loop-manip.h
@@ -20,41 +20,41 @@ along with GCC; see the file COPYING3. If not see
#ifndef GCC_TREE_SSA_LOOP_MANIP_H
#define GCC_TREE_SSA_LOOP_MANIP_H
-typedef void (*transform_callback)(struct loop *, void *);
+typedef void (*transform_callback)(class loop *, void *);
-extern void create_iv (tree, tree, tree, struct loop *, gimple_stmt_iterator *,
+extern void create_iv (tree, tree, tree, class loop *, gimple_stmt_iterator *,
bool, tree *, tree *);
extern void rewrite_into_loop_closed_ssa_1 (bitmap, unsigned, int,
- struct loop *);
+ class loop *);
extern void rewrite_into_loop_closed_ssa (bitmap, unsigned);
-extern void rewrite_virtuals_into_loop_closed_ssa (struct loop *);
-extern void verify_loop_closed_ssa (bool, struct loop * = NULL);
+extern void rewrite_virtuals_into_loop_closed_ssa (class loop *);
+extern void verify_loop_closed_ssa (bool, class loop * = NULL);
static inline void
-checking_verify_loop_closed_ssa (bool verify_ssa_p, struct loop *loop = NULL)
+checking_verify_loop_closed_ssa (bool verify_ssa_p, class loop *loop = NULL)
{
if (flag_checking)
verify_loop_closed_ssa (verify_ssa_p, loop);
}
extern basic_block split_loop_exit_edge (edge, bool = false);
-extern basic_block ip_end_pos (struct loop *);
-extern basic_block ip_normal_pos (struct loop *);
-extern void standard_iv_increment_position (struct loop *,
+extern basic_block ip_end_pos (class loop *);
+extern basic_block ip_normal_pos (class loop *);
+extern void standard_iv_increment_position (class loop *,
gimple_stmt_iterator *, bool *);
-extern bool gimple_duplicate_loop_to_header_edge (struct loop *, edge,
+extern bool gimple_duplicate_loop_to_header_edge (class loop *, edge,
unsigned int, sbitmap,
edge, vec<edge> *,
int);
-extern bool can_unroll_loop_p (struct loop *loop, unsigned factor,
- struct tree_niter_desc *niter);
-extern gcov_type niter_for_unrolled_loop (struct loop *, unsigned);
-extern void tree_transform_and_unroll_loop (struct loop *, unsigned,
- edge, struct tree_niter_desc *,
+extern bool can_unroll_loop_p (class loop *loop, unsigned factor,
+ class tree_niter_desc *niter);
+extern gcov_type niter_for_unrolled_loop (class loop *, unsigned);
+extern void tree_transform_and_unroll_loop (class loop *, unsigned,
+ edge, class tree_niter_desc *,
transform_callback, void *);
-extern void tree_unroll_loop (struct loop *, unsigned,
- edge, struct tree_niter_desc *);
-extern tree canonicalize_loop_ivs (struct loop *, tree *, bool);
+extern void tree_unroll_loop (class loop *, unsigned,
+ edge, class tree_niter_desc *);
+extern tree canonicalize_loop_ivs (class loop *, tree *, bool);
diff --git a/gcc/tree-ssa-loop-niter.c b/gcc/tree-ssa-loop-niter.c
index 5e75a412d93..cd2ced36971 100644
--- a/gcc/tree-ssa-loop-niter.c
+++ b/gcc/tree-ssa-loop-niter.c
@@ -65,7 +65,7 @@ struct bounds
static bool number_of_iterations_popcount (loop_p loop, edge exit,
enum tree_code code,
- struct tree_niter_desc *niter);
+ class tree_niter_desc *niter);
/* Splits expression EXPR to a variable part VAR and constant OFFSET. */
@@ -346,7 +346,7 @@ end:
in TYPE to MIN and MAX. */
static void
-determine_value_range (struct loop *loop, tree type, tree var, mpz_t off,
+determine_value_range (class loop *loop, tree type, tree var, mpz_t off,
mpz_t min, mpz_t max)
{
int cnt = 0;
@@ -704,7 +704,7 @@ end:
comparisons before the loop (usually created by loop header copying). */
static void
-bound_difference (struct loop *loop, tree x, tree y, bounds *bnds)
+bound_difference (class loop *loop, tree x, tree y, bounds *bnds)
{
tree type = TREE_TYPE (x);
tree varx, vary;
@@ -964,8 +964,8 @@ number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
bounds on the difference FINAL - IV->base. */
static bool
-number_of_iterations_ne (struct loop *loop, tree type, affine_iv *iv,
- tree final, struct tree_niter_desc *niter,
+number_of_iterations_ne (class loop *loop, tree type, affine_iv *iv,
+ tree final, class tree_niter_desc *niter,
bool exit_must_be_taken, bounds *bnds)
{
tree niter_type = unsigned_type_for (type);
@@ -1149,7 +1149,7 @@ number_of_iterations_ne (struct loop *loop, tree type, affine_iv *iv,
static bool
number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1,
- struct tree_niter_desc *niter,
+ class tree_niter_desc *niter,
tree *delta, tree step,
bool exit_must_be_taken, bounds *bnds)
{
@@ -1268,7 +1268,7 @@ end:
static bool
assert_no_overflow_lt (tree type, affine_iv *iv0, affine_iv *iv1,
- struct tree_niter_desc *niter, tree step)
+ class tree_niter_desc *niter, tree step)
{
tree bound, d, assumption, diff;
tree niter_type = TREE_TYPE (step);
@@ -1337,7 +1337,7 @@ assert_no_overflow_lt (tree type, affine_iv *iv0, affine_iv *iv1,
static void
assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
- struct tree_niter_desc *niter, bounds *bnds)
+ class tree_niter_desc *niter, bounds *bnds)
{
tree assumption = boolean_true_node, bound, diff;
tree mbz, mbzl, mbzr, type1;
@@ -1463,8 +1463,8 @@ assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
that the exit must be taken eventually. */
static bool
-number_of_iterations_lt (struct loop *loop, tree type, affine_iv *iv0,
- affine_iv *iv1, struct tree_niter_desc *niter,
+number_of_iterations_lt (class loop *loop, tree type, affine_iv *iv0,
+ affine_iv *iv1, class tree_niter_desc *niter,
bool exit_must_be_taken, bounds *bnds)
{
tree niter_type = unsigned_type_for (type);
@@ -1576,8 +1576,8 @@ number_of_iterations_lt (struct loop *loop, tree type, affine_iv *iv0,
is the case). BNDS bounds the difference IV1->base - IV0->base. */
static bool
-number_of_iterations_le (struct loop *loop, tree type, affine_iv *iv0,
- affine_iv *iv1, struct tree_niter_desc *niter,
+number_of_iterations_le (class loop *loop, tree type, affine_iv *iv0,
+ affine_iv *iv1, class tree_niter_desc *niter,
bool exit_must_be_taken, bounds *bnds)
{
tree assumption;
@@ -1721,14 +1721,14 @@ adjust_cond_for_loop_until_wrap (tree type, affine_iv *iv0, tree_code *code,
if EVERY_ITERATION is true, we know the test is executed on every iteration.
The results (number of iterations and assumptions as described in
- comments at struct tree_niter_desc in tree-ssa-loop.h) are stored to NITER.
+ comments at class tree_niter_desc in tree-ssa-loop.h) are stored to NITER.
Returns false if it fails to determine number of iterations, true if it
was determined (possibly with some assumptions). */
static bool
-number_of_iterations_cond (struct loop *loop,
+number_of_iterations_cond (class loop *loop,
tree type, affine_iv *iv0, enum tree_code code,
- affine_iv *iv1, struct tree_niter_desc *niter,
+ affine_iv *iv1, class tree_niter_desc *niter,
bool only_exit, bool every_iteration)
{
bool exit_must_be_taken = false, ret;
@@ -2263,7 +2263,7 @@ tree_simplify_using_condition (tree cond, tree expr)
simplification was possible). */
tree
-simplify_using_initial_conditions (struct loop *loop, tree expr)
+simplify_using_initial_conditions (class loop *loop, tree expr)
{
edge e;
basic_block bb;
@@ -2315,7 +2315,7 @@ simplify_using_initial_conditions (struct loop *loop, tree expr)
(or EXPR unchanged, if no simplification was possible). */
static tree
-simplify_using_outer_evolutions (struct loop *loop, tree expr)
+simplify_using_outer_evolutions (class loop *loop, tree expr)
{
enum tree_code code = TREE_CODE (expr);
bool changed;
@@ -2368,7 +2368,7 @@ simplify_using_outer_evolutions (struct loop *loop, tree expr)
/* Returns true if EXIT is the only possible exit from LOOP. */
bool
-loop_only_exit_p (const struct loop *loop, const_edge exit)
+loop_only_exit_p (const class loop *loop, const_edge exit)
{
basic_block *body;
gimple_stmt_iterator bsi;
@@ -2395,15 +2395,15 @@ loop_only_exit_p (const struct loop *loop, const_edge exit)
/* Stores description of number of iterations of LOOP derived from
EXIT (an exit edge of the LOOP) in NITER. Returns true if some useful
information could be derived (and fields of NITER have meaning described
- in comments at struct tree_niter_desc declaration), false otherwise.
+ in comments at class tree_niter_desc declaration), false otherwise.
When EVERY_ITERATION is true, only tests that are known to be executed
every iteration are considered (i.e. only test that alone bounds the loop).
If AT_STMT is not NULL, this function stores LOOP's condition statement in
it when returning true. */
bool
-number_of_iterations_exit_assumptions (struct loop *loop, edge exit,
- struct tree_niter_desc *niter,
+number_of_iterations_exit_assumptions (class loop *loop, edge exit,
+ class tree_niter_desc *niter,
gcond **at_stmt, bool every_iteration)
{
gimple *last;
@@ -2598,7 +2598,7 @@ ssa_defined_by_minus_one_stmt_p (tree op, tree val)
static bool
number_of_iterations_popcount (loop_p loop, edge exit,
enum tree_code code,
- struct tree_niter_desc *niter)
+ class tree_niter_desc *niter)
{
bool adjust = true;
tree iter;
@@ -2720,8 +2720,8 @@ number_of_iterations_popcount (loop_p loop, edge exit,
the niter information holds unconditionally. */
bool
-number_of_iterations_exit (struct loop *loop, edge exit,
- struct tree_niter_desc *niter,
+number_of_iterations_exit (class loop *loop, edge exit,
+ class tree_niter_desc *niter,
bool warn, bool every_iteration)
{
gcond *stmt;
@@ -2746,13 +2746,13 @@ number_of_iterations_exit (struct loop *loop, edge exit,
chrec_dont_know is returned. */
tree
-find_loop_niter (struct loop *loop, edge *exit)
+find_loop_niter (class loop *loop, edge *exit)
{
unsigned i;
vec<edge> exits = get_loop_exit_edges (loop);
edge ex;
tree niter = NULL_TREE, aniter;
- struct tree_niter_desc desc;
+ class tree_niter_desc desc;
*exit = NULL;
FOR_EACH_VEC_ELT (exits, i, ex)
@@ -2808,7 +2808,7 @@ find_loop_niter (struct loop *loop, edge *exit)
/* Return true if loop is known to have bounded number of iterations. */
bool
-finite_loop_p (struct loop *loop)
+finite_loop_p (class loop *loop)
{
widest_int nit;
int flags;
@@ -2870,7 +2870,7 @@ finite_loop_p (struct loop *loop)
operands are constants. */
static gphi *
-chain_of_csts_start (struct loop *loop, tree x)
+chain_of_csts_start (class loop *loop, tree x)
{
gimple *stmt = SSA_NAME_DEF_STMT (x);
tree use;
@@ -2919,7 +2919,7 @@ chain_of_csts_start (struct loop *loop, tree x)
If such phi node exists, it is returned, otherwise NULL is returned. */
static gphi *
-get_base_for (struct loop *loop, tree x)
+get_base_for (class loop *loop, tree x)
{
gphi *phi;
tree init, next;
@@ -3007,7 +3007,7 @@ get_val_for (tree x, tree base)
of the iterations of LOOP if successful, chrec_dont_know otherwise. */
tree
-loop_niter_by_eval (struct loop *loop, edge exit)
+loop_niter_by_eval (class loop *loop, edge exit)
{
tree acnd;
tree op[2], val[2], next[2], aval[2];
@@ -3108,7 +3108,7 @@ loop_niter_by_eval (struct loop *loop, edge exit)
determines the number of iterations, chrec_dont_know is returned. */
tree
-find_loop_niter_by_eval (struct loop *loop, edge *exit)
+find_loop_niter_by_eval (class loop *loop, edge *exit)
{
unsigned i;
vec<edge> exits = get_loop_exit_edges (loop);
@@ -3325,7 +3325,7 @@ derive_constant_upper_bound_ops (tree type, tree op0,
/* Emit a -Waggressive-loop-optimizations warning if needed. */
static void
-do_warn_aggressive_loop_optimizations (struct loop *loop,
+do_warn_aggressive_loop_optimizations (class loop *loop,
widest_int i_bound, gimple *stmt)
{
/* Don't warn if the loop doesn't have known constant bound. */
@@ -3367,7 +3367,7 @@ do_warn_aggressive_loop_optimizations (struct loop *loop,
BOUND times. I_BOUND is a widest_int upper estimate on BOUND. */
static void
-record_estimate (struct loop *loop, tree bound, const widest_int &i_bound,
+record_estimate (class loop *loop, tree bound, const widest_int &i_bound,
gimple *at_stmt, bool is_exit, bool realistic, bool upper)
{
widest_int delta;
@@ -3399,7 +3399,7 @@ record_estimate (struct loop *loop, tree bound, const widest_int &i_bound,
|| loop->nb_iterations == NULL_TREE
|| TREE_CODE (loop->nb_iterations) != INTEGER_CST))
{
- struct nb_iter_bound *elt = ggc_alloc<nb_iter_bound> ();
+ class nb_iter_bound *elt = ggc_alloc<nb_iter_bound> ();
elt->bound = i_bound;
elt->stmt = at_stmt;
@@ -3436,7 +3436,7 @@ record_estimate (struct loop *loop, tree bound, const widest_int &i_bound,
and doesn't overflow. */
static void
-record_control_iv (struct loop *loop, struct tree_niter_desc *niter)
+record_control_iv (class loop *loop, class tree_niter_desc *niter)
{
struct control_iv *iv;
@@ -3470,7 +3470,7 @@ get_cst_init_from_scev (tree var, wide_int *init, bool is_min)
return false;
gimple *def_stmt = SSA_NAME_DEF_STMT (var);
- struct loop *loop = loop_containing_stmt (def_stmt);
+ class loop *loop = loop_containing_stmt (def_stmt);
if (loop == NULL)
return false;
@@ -3499,7 +3499,7 @@ get_cst_init_from_scev (tree var, wide_int *init, bool is_min)
UPPER is true if we are sure the induction variable does not wrap. */
static void
-record_nonwrapping_iv (struct loop *loop, tree base, tree step, gimple *stmt,
+record_nonwrapping_iv (class loop *loop, tree base, tree step, gimple *stmt,
tree low, tree high, bool realistic, bool upper)
{
tree niter_bound, extreme, delta;
@@ -3576,7 +3576,7 @@ record_nonwrapping_iv (struct loop *loop, tree base, tree step, gimple *stmt,
struct ilb_data
{
- struct loop *loop;
+ class loop *loop;
gimple *stmt;
};
@@ -3587,7 +3587,7 @@ idx_infer_loop_bounds (tree base, tree *idx, void *dta)
tree ev, init, step;
tree low, high, type, next;
bool sign, upper = true, at_end = false;
- struct loop *loop = data->loop;
+ class loop *loop = data->loop;
if (TREE_CODE (base) != ARRAY_REF)
return true;
@@ -3601,7 +3601,7 @@ idx_infer_loop_bounds (tree base, tree *idx, void *dta)
upper = false;
}
- struct loop *dloop = loop_containing_stmt (data->stmt);
+ class loop *dloop = loop_containing_stmt (data->stmt);
if (!dloop)
return true;
@@ -3676,7 +3676,7 @@ idx_infer_loop_bounds (tree base, tree *idx, void *dta)
STMT is guaranteed to be executed in every iteration of LOOP.*/
static void
-infer_loop_bounds_from_ref (struct loop *loop, gimple *stmt, tree ref)
+infer_loop_bounds_from_ref (class loop *loop, gimple *stmt, tree ref)
{
struct ilb_data data;
@@ -3690,7 +3690,7 @@ infer_loop_bounds_from_ref (struct loop *loop, gimple *stmt, tree ref)
executed in every iteration of LOOP. */
static void
-infer_loop_bounds_from_array (struct loop *loop, gimple *stmt)
+infer_loop_bounds_from_array (class loop *loop, gimple *stmt)
{
if (is_gimple_assign (stmt))
{
@@ -3727,7 +3727,7 @@ infer_loop_bounds_from_array (struct loop *loop, gimple *stmt)
that pointer arithmetics in STMT does not overflow. */
static void
-infer_loop_bounds_from_pointer_arith (struct loop *loop, gimple *stmt)
+infer_loop_bounds_from_pointer_arith (class loop *loop, gimple *stmt)
{
tree def, base, step, scev, type, low, high;
tree var, ptr;
@@ -3752,7 +3752,7 @@ infer_loop_bounds_from_pointer_arith (struct loop *loop, gimple *stmt)
if (TYPE_PRECISION (type) != TYPE_PRECISION (TREE_TYPE (var)))
return;
- struct loop *uloop = loop_containing_stmt (stmt);
+ class loop *uloop = loop_containing_stmt (stmt);
scev = instantiate_parameters (loop, analyze_scalar_evolution (uloop, def));
if (chrec_contains_undetermined (scev))
return;
@@ -3786,7 +3786,7 @@ infer_loop_bounds_from_pointer_arith (struct loop *loop, gimple *stmt)
that signed arithmetics in STMT does not overflow. */
static void
-infer_loop_bounds_from_signedness (struct loop *loop, gimple *stmt)
+infer_loop_bounds_from_signedness (class loop *loop, gimple *stmt)
{
tree def, base, step, scev, type, low, high;
@@ -3838,7 +3838,7 @@ infer_loop_bounds_from_signedness (struct loop *loop, gimple *stmt)
*/
static void
-infer_loop_bounds_from_undefined (struct loop *loop)
+infer_loop_bounds_from_undefined (class loop *loop)
{
unsigned i;
basic_block *bbs;
@@ -3918,9 +3918,9 @@ bound_index (vec<widest_int> bounds, const widest_int &bound)
some bounded statement. */
static void
-discover_iteration_bound_by_body_walk (struct loop *loop)
+discover_iteration_bound_by_body_walk (class loop *loop)
{
- struct nb_iter_bound *elt;
+ class nb_iter_bound *elt;
auto_vec<widest_int> bounds;
vec<vec<basic_block> > queues = vNULL;
vec<basic_block> queue = vNULL;
@@ -4083,10 +4083,10 @@ discover_iteration_bound_by_body_walk (struct loop *loop)
count by 1. */
static void
-maybe_lower_iteration_bound (struct loop *loop)
+maybe_lower_iteration_bound (class loop *loop)
{
hash_set<gimple *> *not_executed_last_iteration = NULL;
- struct nb_iter_bound *elt;
+ class nb_iter_bound *elt;
bool found_exit = false;
auto_vec<basic_block> queue;
bitmap visited;
@@ -4236,12 +4236,12 @@ get_upper_bound_based_on_builtin_expr_with_prob (gcond *cond)
is true also use estimates derived from undefined behavior. */
void
-estimate_numbers_of_iterations (struct loop *loop)
+estimate_numbers_of_iterations (class loop *loop)
{
vec<edge> exits;
tree niter, type;
unsigned i;
- struct tree_niter_desc niter_desc;
+ class tree_niter_desc niter_desc;
edge ex;
widest_int bound;
edge likely_exit;
@@ -4337,7 +4337,7 @@ estimate_numbers_of_iterations (struct loop *loop)
the function returns false, otherwise returns true. */
bool
-estimated_loop_iterations (struct loop *loop, widest_int *nit)
+estimated_loop_iterations (class loop *loop, widest_int *nit)
{
/* When SCEV information is available, try to update loop iterations
estimate. Otherwise just return whatever we recorded earlier. */
@@ -4352,7 +4352,7 @@ estimated_loop_iterations (struct loop *loop, widest_int *nit)
on the number of iterations of LOOP could not be derived, returns -1. */
HOST_WIDE_INT
-estimated_loop_iterations_int (struct loop *loop)
+estimated_loop_iterations_int (class loop *loop)
{
widest_int nit;
HOST_WIDE_INT hwi_nit;
@@ -4373,7 +4373,7 @@ estimated_loop_iterations_int (struct loop *loop)
false, otherwise returns true. */
bool
-max_loop_iterations (struct loop *loop, widest_int *nit)
+max_loop_iterations (class loop *loop, widest_int *nit)
{
/* When SCEV information is available, try to update loop iterations
estimate. Otherwise just return whatever we recorded earlier. */
@@ -4388,7 +4388,7 @@ max_loop_iterations (struct loop *loop, widest_int *nit)
on the number of iterations of LOOP could not be derived, returns -1. */
HOST_WIDE_INT
-max_loop_iterations_int (struct loop *loop)
+max_loop_iterations_int (class loop *loop)
{
widest_int nit;
HOST_WIDE_INT hwi_nit;
@@ -4408,7 +4408,7 @@ max_loop_iterations_int (struct loop *loop)
false, otherwise returns true. */
bool
-likely_max_loop_iterations (struct loop *loop, widest_int *nit)
+likely_max_loop_iterations (class loop *loop, widest_int *nit)
{
/* When SCEV information is available, try to update loop iterations
estimate. Otherwise just return whatever we recorded earlier. */
@@ -4423,7 +4423,7 @@ likely_max_loop_iterations (struct loop *loop, widest_int *nit)
on the number of iterations of LOOP could not be derived, returns -1. */
HOST_WIDE_INT
-likely_max_loop_iterations_int (struct loop *loop)
+likely_max_loop_iterations_int (class loop *loop)
{
widest_int nit;
HOST_WIDE_INT hwi_nit;
@@ -4443,7 +4443,7 @@ likely_max_loop_iterations_int (struct loop *loop)
the number of execution of the latch by one. */
HOST_WIDE_INT
-estimated_stmt_executions_int (struct loop *loop)
+estimated_stmt_executions_int (class loop *loop)
{
HOST_WIDE_INT nit = estimated_loop_iterations_int (loop);
HOST_WIDE_INT snit;
@@ -4462,7 +4462,7 @@ estimated_stmt_executions_int (struct loop *loop)
false, otherwise returns true. */
bool
-max_stmt_executions (struct loop *loop, widest_int *nit)
+max_stmt_executions (class loop *loop, widest_int *nit)
{
widest_int nit_minus_one;
@@ -4481,7 +4481,7 @@ max_stmt_executions (struct loop *loop, widest_int *nit)
false, otherwise returns true. */
bool
-likely_max_stmt_executions (struct loop *loop, widest_int *nit)
+likely_max_stmt_executions (class loop *loop, widest_int *nit)
{
widest_int nit_minus_one;
@@ -4500,7 +4500,7 @@ likely_max_stmt_executions (struct loop *loop, widest_int *nit)
false, otherwise returns true. */
bool
-estimated_stmt_executions (struct loop *loop, widest_int *nit)
+estimated_stmt_executions (class loop *loop, widest_int *nit)
{
widest_int nit_minus_one;
@@ -4519,7 +4519,7 @@ estimated_stmt_executions (struct loop *loop, widest_int *nit)
void
estimate_numbers_of_iterations (function *fn)
{
- struct loop *loop;
+ class loop *loop;
/* We don't want to issue signed overflow warnings while getting
loop iteration estimates. */
@@ -4577,7 +4577,7 @@ stmt_dominates_stmt_p (gimple *s1, gimple *s2)
static bool
n_of_executions_at_most (gimple *stmt,
- struct nb_iter_bound *niter_bound,
+ class nb_iter_bound *niter_bound,
tree niter)
{
widest_int bound = niter_bound->bound;
@@ -4664,11 +4664,11 @@ nowrap_type_p (tree type)
static bool
loop_exits_before_overflow (tree base, tree step,
- gimple *at_stmt, struct loop *loop)
+ gimple *at_stmt, class loop *loop)
{
widest_int niter;
struct control_iv *civ;
- struct nb_iter_bound *bound;
+ class nb_iter_bound *bound;
tree e, delta, step_abs, unsigned_base;
tree type = TREE_TYPE (step);
tree unsigned_type, valid_niter;
@@ -4856,7 +4856,7 @@ loop_exits_before_overflow (tree base, tree step,
(4294967295, 4294967296, ...). */
static bool
-scev_var_range_cant_overflow (tree var, tree step, struct loop *loop)
+scev_var_range_cant_overflow (tree var, tree step, class loop *loop)
{
tree type;
wide_int minv, maxv, diff, step_wi;
@@ -4910,7 +4910,7 @@ scev_var_range_cant_overflow (tree var, tree step, struct loop *loop)
bool
scev_probably_wraps_p (tree var, tree base, tree step,
- gimple *at_stmt, struct loop *loop,
+ gimple *at_stmt, class loop *loop,
bool use_overflow_semantics)
{
/* FIXME: We really need something like
@@ -4962,16 +4962,16 @@ scev_probably_wraps_p (tree var, tree base, tree step,
/* Frees the information on upper bounds on numbers of iterations of LOOP. */
void
-free_numbers_of_iterations_estimates (struct loop *loop)
+free_numbers_of_iterations_estimates (class loop *loop)
{
struct control_iv *civ;
- struct nb_iter_bound *bound;
+ class nb_iter_bound *bound;
loop->nb_iterations = NULL;
loop->estimate_state = EST_NOT_COMPUTED;
for (bound = loop->bounds; bound;)
{
- struct nb_iter_bound *next = bound->next;
+ class nb_iter_bound *next = bound->next;
ggc_free (bound);
bound = next;
}
@@ -4991,7 +4991,7 @@ free_numbers_of_iterations_estimates (struct loop *loop)
void
free_numbers_of_iterations_estimates (function *fn)
{
- struct loop *loop;
+ class loop *loop;
FOR_EACH_LOOP_FN (fn, loop, 0)
free_numbers_of_iterations_estimates (loop);
@@ -5001,7 +5001,7 @@ free_numbers_of_iterations_estimates (function *fn)
at LOOP. */
void
-substitute_in_loop_info (struct loop *loop, tree name, tree val)
+substitute_in_loop_info (class loop *loop, tree name, tree val)
{
loop->nb_iterations = simplify_replace_tree (loop->nb_iterations, name, val);
}
diff --git a/gcc/tree-ssa-loop-niter.h b/gcc/tree-ssa-loop-niter.h
index dc116489218..4454c1ac78e 100644
--- a/gcc/tree-ssa-loop-niter.h
+++ b/gcc/tree-ssa-loop-niter.h
@@ -21,39 +21,39 @@ along with GCC; see the file COPYING3. If not see
#define GCC_TREE_SSA_LOOP_NITER_H
extern tree expand_simple_operations (tree, tree = NULL);
-extern tree simplify_using_initial_conditions (struct loop *, tree);
-extern bool loop_only_exit_p (const struct loop *, const_edge);
-extern bool number_of_iterations_exit (struct loop *, edge,
- struct tree_niter_desc *niter, bool,
+extern tree simplify_using_initial_conditions (class loop *, tree);
+extern bool loop_only_exit_p (const class loop *, const_edge);
+extern bool number_of_iterations_exit (class loop *, edge,
+ class tree_niter_desc *niter, bool,
bool every_iteration = true);
-extern bool number_of_iterations_exit_assumptions (struct loop *, edge,
- struct tree_niter_desc *,
+extern bool number_of_iterations_exit_assumptions (class loop *, edge,
+ class tree_niter_desc *,
gcond **, bool = true);
-extern tree find_loop_niter (struct loop *, edge *);
-extern bool finite_loop_p (struct loop *);
-extern tree loop_niter_by_eval (struct loop *, edge);
-extern tree find_loop_niter_by_eval (struct loop *, edge *);
-extern bool estimated_loop_iterations (struct loop *, widest_int *);
-extern HOST_WIDE_INT estimated_loop_iterations_int (struct loop *);
-extern bool max_loop_iterations (struct loop *, widest_int *);
-extern HOST_WIDE_INT max_loop_iterations_int (struct loop *);
-extern bool likely_max_loop_iterations (struct loop *, widest_int *);
-extern HOST_WIDE_INT likely_max_loop_iterations_int (struct loop *);
-extern HOST_WIDE_INT max_stmt_executions_int (struct loop *);
-extern HOST_WIDE_INT likely_max_stmt_executions_int (struct loop *);
-extern HOST_WIDE_INT estimated_stmt_executions_int (struct loop *);
-extern bool max_stmt_executions (struct loop *, widest_int *);
-extern bool likely_max_stmt_executions (struct loop *, widest_int *);
-extern bool estimated_stmt_executions (struct loop *, widest_int *);
+extern tree find_loop_niter (class loop *, edge *);
+extern bool finite_loop_p (class loop *);
+extern tree loop_niter_by_eval (class loop *, edge);
+extern tree find_loop_niter_by_eval (class loop *, edge *);
+extern bool estimated_loop_iterations (class loop *, widest_int *);
+extern HOST_WIDE_INT estimated_loop_iterations_int (class loop *);
+extern bool max_loop_iterations (class loop *, widest_int *);
+extern HOST_WIDE_INT max_loop_iterations_int (class loop *);
+extern bool likely_max_loop_iterations (class loop *, widest_int *);
+extern HOST_WIDE_INT likely_max_loop_iterations_int (class loop *);
+extern HOST_WIDE_INT max_stmt_executions_int (class loop *);
+extern HOST_WIDE_INT likely_max_stmt_executions_int (class loop *);
+extern HOST_WIDE_INT estimated_stmt_executions_int (class loop *);
+extern bool max_stmt_executions (class loop *, widest_int *);
+extern bool likely_max_stmt_executions (class loop *, widest_int *);
+extern bool estimated_stmt_executions (class loop *, widest_int *);
extern void estimate_numbers_of_iterations (function *);
-extern void estimate_numbers_of_iterations (struct loop *);
+extern void estimate_numbers_of_iterations (class loop *);
extern bool stmt_dominates_stmt_p (gimple *, gimple *);
extern bool nowrap_type_p (tree);
extern bool scev_probably_wraps_p (tree, tree, tree, gimple *,
- struct loop *, bool);
-extern void free_numbers_of_iterations_estimates (struct loop *);
+ class loop *, bool);
+extern void free_numbers_of_iterations_estimates (class loop *);
extern void free_numbers_of_iterations_estimates (function *);
extern tree simplify_replace_tree (tree, tree, tree, tree (*)(tree) = NULL);
-extern void substitute_in_loop_info (struct loop *, tree, tree);
+extern void substitute_in_loop_info (class loop *, tree, tree);
#endif /* GCC_TREE_SSA_LOOP_NITER_H */
diff --git a/gcc/tree-ssa-loop-prefetch.c b/gcc/tree-ssa-loop-prefetch.c
index 7afd85f07d5..04ff5244b69 100644
--- a/gcc/tree-ssa-loop-prefetch.c
+++ b/gcc/tree-ssa-loop-prefetch.c
@@ -421,7 +421,7 @@ release_mem_refs (struct mem_ref_group *groups)
struct ar_data
{
- struct loop *loop; /* Loop of the reference. */
+ class loop *loop; /* Loop of the reference. */
gimple *stmt; /* Statement of the reference. */
tree *step; /* Step of the memory reference. */
HOST_WIDE_INT *delta; /* Offset of the memory reference. */
@@ -486,7 +486,7 @@ idx_analyze_ref (tree base, tree *index, void *data)
references from REF_P. */
static bool
-analyze_ref (struct loop *loop, tree *ref_p, tree *base,
+analyze_ref (class loop *loop, tree *ref_p, tree *base,
tree *step, HOST_WIDE_INT *delta,
gimple *stmt)
{
@@ -535,7 +535,7 @@ analyze_ref (struct loop *loop, tree *ref_p, tree *base,
reference was recorded, false otherwise. */
static bool
-gather_memory_references_ref (struct loop *loop, struct mem_ref_group **refs,
+gather_memory_references_ref (class loop *loop, struct mem_ref_group **refs,
tree ref, bool write_p, gimple *stmt)
{
tree base, step;
@@ -606,7 +606,7 @@ gather_memory_references_ref (struct loop *loop, struct mem_ref_group **refs,
true if there are no other memory references inside the loop. */
static struct mem_ref_group *
-gather_memory_references (struct loop *loop, bool *no_other_refs, unsigned *ref_count)
+gather_memory_references (class loop *loop, bool *no_other_refs, unsigned *ref_count)
{
basic_block *body = get_loop_body_in_dom_order (loop);
basic_block bb;
@@ -1286,7 +1286,7 @@ mark_nontemporal_store (struct mem_ref *ref)
/* Issue a memory fence instruction after LOOP. */
static void
-emit_mfence_after_loop (struct loop *loop)
+emit_mfence_after_loop (class loop *loop)
{
vec<edge> exits = get_loop_exit_edges (loop);
edge exit;
@@ -1315,7 +1315,7 @@ emit_mfence_after_loop (struct loop *loop)
/* Returns true if we can use storent in loop, false otherwise. */
static bool
-may_use_storent_in_loop_p (struct loop *loop)
+may_use_storent_in_loop_p (class loop *loop)
{
bool ret = true;
@@ -1345,7 +1345,7 @@ may_use_storent_in_loop_p (struct loop *loop)
references in the loop. */
static void
-mark_nontemporal_stores (struct loop *loop, struct mem_ref_group *groups)
+mark_nontemporal_stores (class loop *loop, struct mem_ref_group *groups)
{
struct mem_ref *ref;
bool any = false;
@@ -1366,7 +1366,7 @@ mark_nontemporal_stores (struct loop *loop, struct mem_ref_group *groups)
iterations. */
static bool
-should_unroll_loop_p (struct loop *loop, struct tree_niter_desc *desc,
+should_unroll_loop_p (class loop *loop, class tree_niter_desc *desc,
unsigned factor)
{
if (!can_unroll_loop_p (loop, factor, desc))
@@ -1390,8 +1390,8 @@ should_unroll_loop_p (struct loop *loop, struct tree_niter_desc *desc,
the loop, or -1 if no estimate is available. */
static unsigned
-determine_unroll_factor (struct loop *loop, struct mem_ref_group *refs,
- unsigned ninsns, struct tree_niter_desc *desc,
+determine_unroll_factor (class loop *loop, struct mem_ref_group *refs,
+ unsigned ninsns, class tree_niter_desc *desc,
HOST_WIDE_INT est_niter)
{
unsigned upper_bound;
@@ -1493,9 +1493,9 @@ volume_of_dist_vector (lambda_vector vec, unsigned *loop_sizes, unsigned n)
static void
add_subscript_strides (tree access_fn, unsigned stride,
- HOST_WIDE_INT *strides, unsigned n, struct loop *loop)
+ HOST_WIDE_INT *strides, unsigned n, class loop *loop)
{
- struct loop *aloop;
+ class loop *aloop;
tree step;
HOST_WIDE_INT astep;
unsigned min_depth = loop_depth (loop) - n;
@@ -1526,7 +1526,7 @@ add_subscript_strides (tree access_fn, unsigned stride,
static unsigned
self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n,
- struct loop *loop)
+ class loop *loop)
{
tree stride, access_fn;
HOST_WIDE_INT *strides, astride;
@@ -1596,10 +1596,10 @@ self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n,
memory references in the loop. Return false if the analysis fails. */
static bool
-determine_loop_nest_reuse (struct loop *loop, struct mem_ref_group *refs,
+determine_loop_nest_reuse (class loop *loop, struct mem_ref_group *refs,
bool no_other_refs)
{
- struct loop *nest, *aloop;
+ class loop *nest, *aloop;
vec<data_reference_p> datarefs = vNULL;
vec<ddr_p> dependences = vNULL;
struct mem_ref_group *gr;
@@ -1879,12 +1879,12 @@ insn_to_prefetch_ratio_too_small_p (unsigned ninsns, unsigned prefetch_count,
true if the LOOP was unrolled. */
static bool
-loop_prefetch_arrays (struct loop *loop)
+loop_prefetch_arrays (class loop *loop)
{
struct mem_ref_group *refs;
unsigned ahead, ninsns, time, unroll_factor;
HOST_WIDE_INT est_niter;
- struct tree_niter_desc desc;
+ class tree_niter_desc desc;
bool unrolled = false, no_other_refs;
unsigned prefetch_count;
unsigned mem_ref_count;
@@ -1982,7 +1982,7 @@ fail:
unsigned int
tree_ssa_prefetch_arrays (void)
{
- struct loop *loop;
+ class loop *loop;
bool unrolled = false;
int todo_flags = 0;
diff --git a/gcc/tree-ssa-loop-split.c b/gcc/tree-ssa-loop-split.c
index 999c9a30366..f5f083384bc 100644
--- a/gcc/tree-ssa-loop-split.c
+++ b/gcc/tree-ssa-loop-split.c
@@ -70,7 +70,7 @@ along with GCC; see the file COPYING3. If not see
point in *BORDER and the comparison induction variable in IV. */
static tree
-split_at_bb_p (struct loop *loop, basic_block bb, tree *border, affine_iv *iv)
+split_at_bb_p (class loop *loop, basic_block bb, tree *border, affine_iv *iv)
{
gimple *last;
gcond *stmt;
@@ -102,7 +102,7 @@ split_at_bb_p (struct loop *loop, basic_block bb, tree *border, affine_iv *iv)
tree op0 = gimple_cond_lhs (stmt);
tree op1 = gimple_cond_rhs (stmt);
- struct loop *useloop = loop_containing_stmt (stmt);
+ class loop *useloop = loop_containing_stmt (stmt);
if (!simple_iv (loop, useloop, op0, iv, false))
return NULL_TREE;
@@ -150,7 +150,7 @@ split_at_bb_p (struct loop *loop, basic_block bb, tree *border, affine_iv *iv)
also be true/false in the next iteration. */
static void
-patch_loop_exit (struct loop *loop, gcond *guard, tree nextval, tree newbound,
+patch_loop_exit (class loop *loop, gcond *guard, tree nextval, tree newbound,
bool initial_true)
{
edge exit = single_exit (loop);
@@ -181,7 +181,7 @@ patch_loop_exit (struct loop *loop, gcond *guard, tree nextval, tree newbound,
such phi node. Return that phi node. */
static gphi *
-find_or_create_guard_phi (struct loop *loop, tree guard_iv, affine_iv * /*iv*/)
+find_or_create_guard_phi (class loop *loop, tree guard_iv, affine_iv * /*iv*/)
{
gimple *def = SSA_NAME_DEF_STMT (guard_iv);
gphi *phi;
@@ -197,7 +197,7 @@ find_or_create_guard_phi (struct loop *loop, tree guard_iv, affine_iv * /*iv*/)
determined easily (i.e. that connect_loop_phis can determine them). */
static bool
-easy_exit_values (struct loop *loop)
+easy_exit_values (class loop *loop)
{
edge exit = single_exit (loop);
edge latch = loop_latch_edge (loop);
@@ -229,7 +229,7 @@ easy_exit_values (struct loop *loop)
this. The loops need to fulfill easy_exit_values(). */
static void
-connect_loop_phis (struct loop *loop1, struct loop *loop2, edge new_e)
+connect_loop_phis (class loop *loop1, class loop *loop2, edge new_e)
{
basic_block rest = loop_preheader_edge (loop2)->src;
gcc_assert (new_e->dest == rest);
@@ -323,7 +323,7 @@ connect_loop_phis (struct loop *loop1, struct loop *loop2, edge new_e)
This doesn't update the SSA form, see connect_loop_phis for that. */
static edge
-connect_loops (struct loop *loop1, struct loop *loop2)
+connect_loops (class loop *loop1, class loop *loop2)
{
edge exit = single_exit (loop1);
basic_block skip_bb = split_edge (exit);
@@ -387,7 +387,7 @@ connect_loops (struct loop *loop1, struct loop *loop2)
and add or subtract 1. This routine computes newend above. */
static tree
-compute_new_first_bound (gimple_seq *stmts, struct tree_niter_desc *niter,
+compute_new_first_bound (gimple_seq *stmts, class tree_niter_desc *niter,
tree border,
enum tree_code guard_code, tree guard_init)
{
@@ -487,7 +487,7 @@ compute_new_first_bound (gimple_seq *stmts, struct tree_niter_desc *niter,
single exit of LOOP. */
static bool
-split_loop (struct loop *loop1, struct tree_niter_desc *niter)
+split_loop (class loop *loop1, class tree_niter_desc *niter)
{
basic_block *bbs;
unsigned i;
@@ -557,7 +557,7 @@ split_loop (struct loop *loop1, struct tree_niter_desc *niter)
initialize_original_copy_tables ();
basic_block cond_bb;
- struct loop *loop2 = loop_version (loop1, cond, &cond_bb,
+ class loop *loop2 = loop_version (loop1, cond, &cond_bb,
profile_probability::always (),
profile_probability::always (),
profile_probability::always (),
@@ -617,7 +617,7 @@ split_loop (struct loop *loop1, struct tree_niter_desc *niter)
static unsigned int
tree_ssa_split_loops (void)
{
- struct loop *loop;
+ class loop *loop;
bool changed = false;
gcc_assert (scev_initialized_p ());
@@ -627,7 +627,7 @@ tree_ssa_split_loops (void)
/* Go through all loops starting from innermost. */
FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
{
- struct tree_niter_desc niter;
+ class tree_niter_desc niter;
if (loop->aux)
{
/* If any of our inner loops was split, don't split us,
diff --git a/gcc/tree-ssa-loop-unswitch.c b/gcc/tree-ssa-loop-unswitch.c
index 30a2a9dd26e..e60019db946 100644
--- a/gcc/tree-ssa-loop-unswitch.c
+++ b/gcc/tree-ssa-loop-unswitch.c
@@ -75,23 +75,23 @@ along with GCC; see the file COPYING3. If not see
tree-ssa-loop-im.c ensures that all the suitable conditions are in this
shape. */
-static struct loop *tree_unswitch_loop (struct loop *, basic_block, tree);
-static bool tree_unswitch_single_loop (struct loop *, int);
-static tree tree_may_unswitch_on (basic_block, struct loop *);
-static bool tree_unswitch_outer_loop (struct loop *);
-static edge find_loop_guard (struct loop *);
-static bool empty_bb_without_guard_p (struct loop *, basic_block);
-static bool used_outside_loop_p (struct loop *, tree);
-static void hoist_guard (struct loop *, edge);
-static bool check_exit_phi (struct loop *);
-static tree get_vop_from_header (struct loop *);
+static class loop *tree_unswitch_loop (class loop *, basic_block, tree);
+static bool tree_unswitch_single_loop (class loop *, int);
+static tree tree_may_unswitch_on (basic_block, class loop *);
+static bool tree_unswitch_outer_loop (class loop *);
+static edge find_loop_guard (class loop *);
+static bool empty_bb_without_guard_p (class loop *, basic_block);
+static bool used_outside_loop_p (class loop *, tree);
+static void hoist_guard (class loop *, edge);
+static bool check_exit_phi (class loop *);
+static tree get_vop_from_header (class loop *);
/* Main entry point. Perform loop unswitching on all suitable loops. */
unsigned int
tree_ssa_unswitch_loops (void)
{
- struct loop *loop;
+ class loop *loop;
bool changed = false;
/* Go through all loops starting from innermost. */
@@ -114,7 +114,7 @@ tree_ssa_unswitch_loops (void)
considering for unswitching and LOOP is the loop it appears in. */
static bool
-is_maybe_undefined (const tree name, gimple *stmt, struct loop *loop)
+is_maybe_undefined (const tree name, gimple *stmt, class loop *loop)
{
/* The loop header is the only block we can trivially determine that
will always be executed. If the comparison is in the loop
@@ -187,7 +187,7 @@ is_maybe_undefined (const tree name, gimple *stmt, struct loop *loop)
basic blocks (for what it means see comments below). */
static tree
-tree_may_unswitch_on (basic_block bb, struct loop *loop)
+tree_may_unswitch_on (basic_block bb, class loop *loop)
{
gimple *last, *def;
gcond *stmt;
@@ -232,7 +232,7 @@ tree_may_unswitch_on (basic_block bb, struct loop *loop)
unnecessarily). */
static tree
-simplify_using_entry_checks (struct loop *loop, tree cond)
+simplify_using_entry_checks (class loop *loop, tree cond)
{
edge e = loop_preheader_edge (loop);
gimple *stmt;
@@ -265,10 +265,10 @@ simplify_using_entry_checks (struct loop *loop, tree cond)
grow exponentially. */
static bool
-tree_unswitch_single_loop (struct loop *loop, int num)
+tree_unswitch_single_loop (class loop *loop, int num)
{
basic_block *bbs;
- struct loop *nloop;
+ class loop *nloop;
unsigned i, found;
tree cond = NULL_TREE;
gimple *stmt;
@@ -476,8 +476,8 @@ tree_unswitch_single_loop (struct loop *loop, int num)
loop is entered -- the new loop is entered if COND is true. Returns NULL
if impossible, new loop otherwise. */
-static struct loop *
-tree_unswitch_loop (struct loop *loop,
+static class loop *
+tree_unswitch_loop (class loop *loop,
basic_block unswitch_on, tree cond)
{
profile_probability prob_true;
@@ -500,7 +500,7 @@ tree_unswitch_loop (struct loop *loop,
/* Unswitch outer loops by hoisting invariant guard on
inner loop without code duplication. */
static bool
-tree_unswitch_outer_loop (struct loop *loop)
+tree_unswitch_outer_loop (class loop *loop)
{
edge exit, guard;
HOST_WIDE_INT iterations;
@@ -544,7 +544,7 @@ tree_unswitch_outer_loop (struct loop *loop)
otherwise returns NULL. */
static edge
-find_loop_guard (struct loop *loop)
+find_loop_guard (class loop *loop)
{
basic_block header = loop->header;
edge guard_edge, te, fe;
@@ -701,7 +701,7 @@ end:
are invariant or not. */
static bool
-empty_bb_without_guard_p (struct loop *loop, basic_block bb)
+empty_bb_without_guard_p (class loop *loop, basic_block bb)
{
basic_block exit_bb = single_exit (loop)->src;
bool may_be_used_outside = (bb == exit_bb
@@ -749,7 +749,7 @@ empty_bb_without_guard_p (struct loop *loop, basic_block bb)
/* Return true if NAME is used outside of LOOP. */
static bool
-used_outside_loop_p (struct loop *loop, tree name)
+used_outside_loop_p (class loop *loop, tree name)
{
imm_use_iterator it;
use_operand_p use;
@@ -767,7 +767,7 @@ used_outside_loop_p (struct loop *loop, tree name)
/* Return argument for loop preheader edge in header virtual phi if any. */
static tree
-get_vop_from_header (struct loop *loop)
+get_vop_from_header (class loop *loop)
{
for (gphi_iterator gsi = gsi_start_phis (loop->header);
!gsi_end_p (gsi); gsi_next (&gsi))
@@ -783,7 +783,7 @@ get_vop_from_header (struct loop *loop)
/* Move the check of GUARD outside of LOOP. */
static void
-hoist_guard (struct loop *loop, edge guard)
+hoist_guard (class loop *loop, edge guard)
{
edge exit = single_exit (loop);
edge preh = loop_preheader_edge (loop);
@@ -934,7 +934,7 @@ hoist_guard (struct loop *loop, edge guard)
for edge around loop. */
static bool
-check_exit_phi (struct loop *loop)
+check_exit_phi (class loop *loop)
{
edge exit = single_exit (loop);
basic_block pre_header = loop_preheader_edge (loop)->src;
diff --git a/gcc/tree-ssa-loop.c b/gcc/tree-ssa-loop.c
index 208c58354b4..fc9f08363ce 100644
--- a/gcc/tree-ssa-loop.c
+++ b/gcc/tree-ssa-loop.c
@@ -157,7 +157,7 @@ gate_oacc_kernels (function *fn)
if (!lookup_attribute ("oacc kernels", DECL_ATTRIBUTES (fn->decl)))
return false;
- struct loop *loop;
+ class loop *loop;
FOR_EACH_LOOP (loop, 0)
if (loop->in_oacc_kernels_region)
return true;
@@ -455,7 +455,7 @@ public:
unsigned
pass_scev_cprop::execute (function *)
{
- struct loop *loop;
+ class loop *loop;
bool any = false;
/* Perform final value replacement in loops, in case the replacement
@@ -776,7 +776,7 @@ get_lsm_tmp_name (tree ref, unsigned n, const char *suffix)
/* Computes an estimated number of insns in LOOP, weighted by WEIGHTS. */
unsigned
-tree_num_loop_insns (struct loop *loop, eni_weights *weights)
+tree_num_loop_insns (class loop *loop, eni_weights *weights)
{
basic_block *body = get_loop_body (loop);
gimple_stmt_iterator gsi;
diff --git a/gcc/tree-ssa-loop.h b/gcc/tree-ssa-loop.h
index 24fad0f3e10..e523de21380 100644
--- a/gcc/tree-ssa-loop.h
+++ b/gcc/tree-ssa-loop.h
@@ -66,11 +66,11 @@ public:
extern bool for_each_index (tree *, bool (*) (tree, tree *, void *), void *);
extern char *get_lsm_tmp_name (tree ref, unsigned n, const char *suffix = NULL);
-extern unsigned tree_num_loop_insns (struct loop *, struct eni_weights *);
+extern unsigned tree_num_loop_insns (class loop *, struct eni_weights *);
/* Returns the loop of the statement STMT. */
-static inline struct loop *
+static inline class loop *
loop_containing_stmt (gimple *stmt)
{
basic_block bb = gimple_bb (stmt);
diff --git a/gcc/tree-ssa-reassoc.c b/gcc/tree-ssa-reassoc.c
index 9c1a9a651fe..635fc937617 100644
--- a/gcc/tree-ssa-reassoc.c
+++ b/gcc/tree-ssa-reassoc.c
@@ -270,7 +270,7 @@ static long
phi_rank (gimple *stmt)
{
basic_block bb = gimple_bb (stmt);
- struct loop *father = bb->loop_father;
+ class loop *father = bb->loop_father;
tree res;
unsigned i;
use_operand_p use;
@@ -603,7 +603,7 @@ add_repeat_to_ops_vec (vec<operand_entry *> *ops, tree op,
operation with tree code CODE, and is inside LOOP. */
static bool
-is_reassociable_op (gimple *stmt, enum tree_code code, struct loop *loop)
+is_reassociable_op (gimple *stmt, enum tree_code code, class loop *loop)
{
basic_block bb = gimple_bb (stmt);
@@ -1560,7 +1560,7 @@ build_and_add_sum (tree type, tree op1, tree op2, enum tree_code opcode)
static bool
undistribute_ops_list (enum tree_code opcode,
- vec<operand_entry *> *ops, struct loop *loop)
+ vec<operand_entry *> *ops, class loop *loop)
{
unsigned int length = ops->length ();
operand_entry *oe1;
@@ -3861,7 +3861,7 @@ no_side_effect_bb (basic_block bb)
static bool
get_ops (tree var, enum tree_code code, vec<operand_entry *> *ops,
- struct loop *loop)
+ class loop *loop)
{
gimple *stmt = SSA_NAME_DEF_STMT (var);
tree rhs[2];
@@ -3896,7 +3896,7 @@ get_ops (tree var, enum tree_code code, vec<operand_entry *> *ops,
static tree
update_ops (tree var, enum tree_code code, vec<operand_entry *> ops,
- unsigned int *pidx, struct loop *loop)
+ unsigned int *pidx, class loop *loop)
{
gimple *stmt = SSA_NAME_DEF_STMT (var);
tree rhs[4];
@@ -4834,7 +4834,7 @@ linearize_expr (gimple *stmt)
gimple *oldbinrhs = binrhs;
enum tree_code rhscode = gimple_assign_rhs_code (stmt);
gimple *newbinrhs = NULL;
- struct loop *loop = loop_containing_stmt (stmt);
+ class loop *loop = loop_containing_stmt (stmt);
tree lhs = gimple_assign_lhs (stmt);
gcc_assert (is_reassociable_op (binlhs, rhscode, loop)
@@ -4968,7 +4968,7 @@ should_break_up_subtract (gimple *stmt)
tree binlhs = gimple_assign_rhs1 (stmt);
tree binrhs = gimple_assign_rhs2 (stmt);
gimple *immusestmt;
- struct loop *loop = loop_containing_stmt (stmt);
+ class loop *loop = loop_containing_stmt (stmt);
if (TREE_CODE (binlhs) == SSA_NAME
&& is_reassociable_op (SSA_NAME_DEF_STMT (binlhs), PLUS_EXPR, loop))
@@ -5123,7 +5123,7 @@ linearize_expr_tree (vec<operand_entry *> *ops, gimple *stmt,
bool binlhsisreassoc = false;
bool binrhsisreassoc = false;
enum tree_code rhscode = gimple_assign_rhs_code (stmt);
- struct loop *loop = loop_containing_stmt (stmt);
+ class loop *loop = loop_containing_stmt (stmt);
if (set_visited)
gimple_set_visited (stmt, true);
diff --git a/gcc/tree-ssa-scopedtables.c b/gcc/tree-ssa-scopedtables.c
index 838cf8fa31d..574bc30eee1 100644
--- a/gcc/tree-ssa-scopedtables.c
+++ b/gcc/tree-ssa-scopedtables.c
@@ -1028,9 +1028,9 @@ bool
expr_elt_hasher::equal (const value_type &p1, const compare_type &p2)
{
const struct hashable_expr *expr1 = p1->expr ();
- const struct expr_hash_elt *stamp1 = p1->stamp ();
+ const class expr_hash_elt *stamp1 = p1->stamp ();
const struct hashable_expr *expr2 = p2->expr ();
- const struct expr_hash_elt *stamp2 = p2->stamp ();
+ const class expr_hash_elt *stamp2 = p2->stamp ();
/* This case should apply only when removing entries from the table. */
if (stamp1 == stamp2)
diff --git a/gcc/tree-ssa-scopedtables.h b/gcc/tree-ssa-scopedtables.h
index 2328d1cc60e..48185006823 100644
--- a/gcc/tree-ssa-scopedtables.h
+++ b/gcc/tree-ssa-scopedtables.h
@@ -96,7 +96,7 @@ class expr_hash_elt
/* A unique stamp, typically the address of the hash
element itself, used in removing entries from the table. */
- struct expr_hash_elt *m_stamp;
+ class expr_hash_elt *m_stamp;
/* We should never be making assignments between objects in this class.
Though it might allow us to exploit C++11 move semantics if we
diff --git a/gcc/tree-ssa-structalias.c b/gcc/tree-ssa-structalias.c
index f470f31d295..f8962d618d6 100644
--- a/gcc/tree-ssa-structalias.c
+++ b/gcc/tree-ssa-structalias.c
@@ -1420,7 +1420,7 @@ public:
number 1, pages 9-14. */
static void
-scc_visit (constraint_graph_t graph, struct scc_info *si, unsigned int n)
+scc_visit (constraint_graph_t graph, class scc_info *si, unsigned int n)
{
unsigned int i;
bitmap_iterator bi;
@@ -2023,7 +2023,7 @@ static int location_equiv_class;
and label it's nodes with DFS numbers. */
static void
-condense_visit (constraint_graph_t graph, struct scc_info *si, unsigned int n)
+condense_visit (constraint_graph_t graph, class scc_info *si, unsigned int n)
{
unsigned int i;
bitmap_iterator bi;
@@ -2128,7 +2128,7 @@ condense_visit (constraint_graph_t graph, struct scc_info *si, unsigned int n)
3. Hashable. */
static void
-label_visit (constraint_graph_t graph, struct scc_info *si, unsigned int n)
+label_visit (constraint_graph_t graph, class scc_info *si, unsigned int n)
{
unsigned int i, first_pred;
bitmap_iterator bi;
@@ -2215,7 +2215,7 @@ label_visit (constraint_graph_t graph, struct scc_info *si, unsigned int n)
/* Print the pred graph in dot format. */
static void
-dump_pred_graph (struct scc_info *si, FILE *file)
+dump_pred_graph (class scc_info *si, FILE *file)
{
unsigned int i;
@@ -2290,7 +2290,7 @@ dump_pred_graph (struct scc_info *si, FILE *file)
/* Perform offline variable substitution, discovering equivalence
classes, and eliminating non-pointer variables. */
-static struct scc_info *
+static class scc_info *
perform_var_substitution (constraint_graph_t graph)
{
unsigned int i;
@@ -2424,7 +2424,7 @@ perform_var_substitution (constraint_graph_t graph)
substitution. */
static void
-free_var_substitution_info (struct scc_info *si)
+free_var_substitution_info (class scc_info *si)
{
delete si;
free (graph->pointer_label);
@@ -2548,7 +2548,7 @@ move_complex_constraints (constraint_graph_t graph)
static void
rewrite_constraints (constraint_graph_t graph,
- struct scc_info *si)
+ class scc_info *si)
{
int i;
constraint_t c;
@@ -7184,7 +7184,7 @@ remove_preds_and_fake_succs (constraint_graph_t graph)
static void
solve_constraints (void)
{
- struct scc_info *si;
+ class scc_info *si;
/* Sort varinfos so that ones that cannot be pointed to are last.
This makes bitmaps more efficient. */
diff --git a/gcc/tree-ssa-threadupdate.c b/gcc/tree-ssa-threadupdate.c
index a56ccfbaa8c..51a316a0066 100644
--- a/gcc/tree-ssa-threadupdate.c
+++ b/gcc/tree-ssa-threadupdate.c
@@ -1556,7 +1556,7 @@ dbds_continue_enumeration_p (const_basic_block bb, const void *stop)
returns the state. */
enum bb_dom_status
-determine_bb_domination_status (struct loop *loop, basic_block bb)
+determine_bb_domination_status (class loop *loop, basic_block bb)
{
basic_block *bblocks;
unsigned nblocks, i;
@@ -1614,7 +1614,7 @@ determine_bb_domination_status (struct loop *loop, basic_block bb)
to the inside of the loop. */
static bool
-thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers)
+thread_through_loop_header (class loop *loop, bool may_peel_loop_headers)
{
basic_block header = loop->header;
edge e, tgt_edge, latch = loop_latch_edge (loop);
@@ -2317,7 +2317,7 @@ duplicate_thread_path (edge entry, edge exit, basic_block *region,
unsigned n_region, unsigned current_path_no)
{
unsigned i;
- struct loop *loop = entry->dest->loop_father;
+ class loop *loop = entry->dest->loop_father;
edge exit_copy;
edge redirected;
profile_count curr_count;
@@ -2517,7 +2517,7 @@ thread_through_all_blocks (bool may_peel_loop_headers)
{
bool retval = false;
unsigned int i;
- struct loop *loop;
+ class loop *loop;
auto_bitmap threaded_blocks;
hash_set<edge> visited_starting_edges;
diff --git a/gcc/tree-ssa-threadupdate.h b/gcc/tree-ssa-threadupdate.h
index d66e8e00133..dc6de2f62b2 100644
--- a/gcc/tree-ssa-threadupdate.h
+++ b/gcc/tree-ssa-threadupdate.h
@@ -59,6 +59,6 @@ enum bb_dom_status
DOMST_DOMINATING
};
-enum bb_dom_status determine_bb_domination_status (struct loop *, basic_block);
+enum bb_dom_status determine_bb_domination_status (class loop *, basic_block);
#endif
diff --git a/gcc/tree-streamer-in.c b/gcc/tree-streamer-in.c
index 35b3e9874d5..dc8bbf81cc9 100644
--- a/gcc/tree-streamer-in.c
+++ b/gcc/tree-streamer-in.c
@@ -41,7 +41,7 @@ along with GCC; see the file COPYING3. If not see
block IB. */
tree
-streamer_read_string_cst (struct data_in *data_in, struct lto_input_block *ib)
+streamer_read_string_cst (class data_in *data_in, class lto_input_block *ib)
{
unsigned int len;
const char * ptr;
@@ -57,7 +57,7 @@ streamer_read_string_cst (struct data_in *data_in, struct lto_input_block *ib)
block IB. */
static tree
-input_identifier (struct data_in *data_in, struct lto_input_block *ib)
+input_identifier (class data_in *data_in, class lto_input_block *ib)
{
unsigned int len;
const char *ptr;
@@ -73,7 +73,7 @@ input_identifier (struct data_in *data_in, struct lto_input_block *ib)
tables and descriptors for the file being read. */
tree
-streamer_read_chain (struct lto_input_block *ib, struct data_in *data_in)
+streamer_read_chain (class lto_input_block *ib, class data_in *data_in)
{
tree first, prev, curr;
@@ -404,7 +404,7 @@ unpack_ts_type_common_value_fields (struct bitpack_d *bp, tree expr)
of expression EXPR from bitpack BP. */
static void
-unpack_ts_block_value_fields (struct data_in *data_in,
+unpack_ts_block_value_fields (class data_in *data_in,
struct bitpack_d *bp, tree expr)
{
/* BLOCK_NUMBER is recomputed. */
@@ -415,7 +415,7 @@ unpack_ts_block_value_fields (struct data_in *data_in,
structure of expression EXPR from bitpack BP. */
static void
-unpack_ts_translation_unit_decl_value_fields (struct data_in *data_in,
+unpack_ts_translation_unit_decl_value_fields (class data_in *data_in,
struct bitpack_d *bp, tree expr)
{
TRANSLATION_UNIT_LANGUAGE (expr) = xstrdup (bp_unpack_string (data_in, bp));
@@ -427,7 +427,7 @@ unpack_ts_translation_unit_decl_value_fields (struct data_in *data_in,
structure of expression EXPR from bitpack BP. */
static void
-unpack_ts_omp_clause_value_fields (struct data_in *data_in,
+unpack_ts_omp_clause_value_fields (class data_in *data_in,
struct bitpack_d *bp, tree expr)
{
stream_input_location (&OMP_CLAUSE_LOCATION (expr), bp, data_in);
@@ -473,8 +473,8 @@ unpack_ts_omp_clause_value_fields (struct data_in *data_in,
bitfield values that the writer may have written. */
void
-streamer_read_tree_bitfields (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+streamer_read_tree_bitfields (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
enum tree_code code;
struct bitpack_d bp;
@@ -570,7 +570,7 @@ streamer_read_tree_bitfields (struct lto_input_block *ib,
*IX_P the index into the reader cache where the new tree is stored. */
tree
-streamer_alloc_tree (struct lto_input_block *ib, struct data_in *data_in,
+streamer_alloc_tree (class lto_input_block *ib, class data_in *data_in,
enum LTO_tags tag)
{
enum tree_code code;
@@ -640,8 +640,8 @@ streamer_alloc_tree (struct lto_input_block *ib, struct data_in *data_in,
static void
-lto_input_ts_common_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+lto_input_ts_common_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
if (TREE_CODE (expr) != IDENTIFIER_NODE)
TREE_TYPE (expr) = stream_read_tree (ib, data_in);
@@ -653,8 +653,8 @@ lto_input_ts_common_tree_pointers (struct lto_input_block *ib,
file being read. */
static void
-lto_input_ts_vector_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+lto_input_ts_vector_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
unsigned int count = vector_cst_encoded_nelts (expr);
for (unsigned int i = 0; i < count; ++i)
@@ -667,8 +667,8 @@ lto_input_ts_vector_tree_pointers (struct lto_input_block *ib,
file being read. */
static void
-lto_input_ts_poly_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+lto_input_ts_poly_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
POLY_INT_CST_COEFF (expr, i) = stream_read_tree (ib, data_in);
@@ -680,8 +680,8 @@ lto_input_ts_poly_tree_pointers (struct lto_input_block *ib,
file being read. */
static void
-lto_input_ts_complex_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+lto_input_ts_complex_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
TREE_REALPART (expr) = stream_read_tree (ib, data_in);
TREE_IMAGPART (expr) = stream_read_tree (ib, data_in);
@@ -693,8 +693,8 @@ lto_input_ts_complex_tree_pointers (struct lto_input_block *ib,
file being read. */
static void
-lto_input_ts_decl_minimal_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+lto_input_ts_decl_minimal_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
DECL_NAME (expr) = stream_read_tree (ib, data_in);
DECL_CONTEXT (expr) = stream_read_tree (ib, data_in);
@@ -706,8 +706,8 @@ lto_input_ts_decl_minimal_tree_pointers (struct lto_input_block *ib,
file being read. */
static void
-lto_input_ts_decl_common_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+lto_input_ts_decl_common_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
DECL_SIZE (expr) = stream_read_tree (ib, data_in);
DECL_SIZE_UNIT (expr) = stream_read_tree (ib, data_in);
@@ -733,8 +733,8 @@ lto_input_ts_decl_common_tree_pointers (struct lto_input_block *ib,
file being read. */
static void
-lto_input_ts_decl_non_common_tree_pointers (struct lto_input_block *,
- struct data_in *, tree)
+lto_input_ts_decl_non_common_tree_pointers (class lto_input_block *,
+ class data_in *, tree)
{
}
@@ -744,8 +744,8 @@ lto_input_ts_decl_non_common_tree_pointers (struct lto_input_block *,
file being read. */
static void
-lto_input_ts_decl_with_vis_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+lto_input_ts_decl_with_vis_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
tree id;
@@ -763,8 +763,8 @@ lto_input_ts_decl_with_vis_tree_pointers (struct lto_input_block *ib,
file being read. */
static void
-lto_input_ts_field_decl_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+lto_input_ts_field_decl_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
DECL_FIELD_OFFSET (expr) = stream_read_tree (ib, data_in);
DECL_BIT_FIELD_TYPE (expr) = stream_read_tree (ib, data_in);
@@ -778,8 +778,8 @@ lto_input_ts_field_decl_tree_pointers (struct lto_input_block *ib,
file being read. */
static void
-lto_input_ts_function_decl_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+lto_input_ts_function_decl_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
/* DECL_STRUCT_FUNCTION is loaded on demand by cgraph_get_body. */
DECL_FUNCTION_PERSONALITY (expr) = stream_read_tree (ib, data_in);
@@ -816,8 +816,8 @@ lto_input_ts_function_decl_tree_pointers (struct lto_input_block *ib,
being read. */
static void
-lto_input_ts_type_common_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+lto_input_ts_type_common_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
TYPE_SIZE (expr) = stream_read_tree (ib, data_in);
TYPE_SIZE_UNIT (expr) = stream_read_tree (ib, data_in);
@@ -838,8 +838,8 @@ lto_input_ts_type_common_tree_pointers (struct lto_input_block *ib,
file being read. */
static void
-lto_input_ts_type_non_common_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in,
+lto_input_ts_type_non_common_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in,
tree expr)
{
if (TREE_CODE (expr) == ENUMERAL_TYPE)
@@ -863,8 +863,8 @@ lto_input_ts_type_non_common_tree_pointers (struct lto_input_block *ib,
file being read. */
static void
-lto_input_ts_list_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+lto_input_ts_list_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
TREE_PURPOSE (expr) = stream_read_tree (ib, data_in);
TREE_VALUE (expr) = stream_read_tree (ib, data_in);
@@ -877,8 +877,8 @@ lto_input_ts_list_tree_pointers (struct lto_input_block *ib,
file being read. */
static void
-lto_input_ts_vec_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+lto_input_ts_vec_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
int i;
@@ -895,8 +895,8 @@ lto_input_ts_vec_tree_pointers (struct lto_input_block *ib,
static void
-lto_input_ts_exp_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+lto_input_ts_exp_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
int i;
tree block;
@@ -921,8 +921,8 @@ lto_input_ts_exp_tree_pointers (struct lto_input_block *ib,
file being read. */
static void
-lto_input_ts_block_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+lto_input_ts_block_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
BLOCK_VARS (expr) = streamer_read_chain (ib, data_in);
@@ -967,8 +967,8 @@ lto_input_ts_block_tree_pointers (struct lto_input_block *ib,
file being read. */
static void
-lto_input_ts_binfo_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+lto_input_ts_binfo_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
tree t;
@@ -999,8 +999,8 @@ lto_input_ts_binfo_tree_pointers (struct lto_input_block *ib,
file being read. */
static void
-lto_input_ts_constructor_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+lto_input_ts_constructor_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
unsigned i;
@@ -1019,8 +1019,8 @@ lto_input_ts_constructor_tree_pointers (struct lto_input_block *ib,
file being read. */
static void
-lto_input_ts_omp_clause_tree_pointers (struct lto_input_block *ib,
- struct data_in *data_in, tree expr)
+lto_input_ts_omp_clause_tree_pointers (class lto_input_block *ib,
+ class data_in *data_in, tree expr)
{
int i;
@@ -1034,7 +1034,7 @@ lto_input_ts_omp_clause_tree_pointers (struct lto_input_block *ib,
contains tables and descriptors for the file being read. */
void
-streamer_read_tree_body (struct lto_input_block *ib, struct data_in *data_in,
+streamer_read_tree_body (class lto_input_block *ib, class data_in *data_in,
tree expr)
{
enum tree_code code;
@@ -1104,7 +1104,7 @@ streamer_read_tree_body (struct lto_input_block *ib, struct data_in *data_in,
DATA_IN->FILE_DATA->GLOBALS_INDEX[IX]. */
tree
-streamer_get_pickled_tree (struct lto_input_block *ib, struct data_in *data_in)
+streamer_get_pickled_tree (class lto_input_block *ib, class data_in *data_in)
{
unsigned HOST_WIDE_INT ix;
tree result;
diff --git a/gcc/tree-streamer.h b/gcc/tree-streamer.h
index 2972861731c..01ddd638e37 100644
--- a/gcc/tree-streamer.h
+++ b/gcc/tree-streamer.h
@@ -58,14 +58,14 @@ struct streamer_tree_cache_d
};
/* In tree-streamer-in.c. */
-tree streamer_read_string_cst (struct data_in *, struct lto_input_block *);
-tree streamer_read_chain (struct lto_input_block *, struct data_in *);
-tree streamer_alloc_tree (struct lto_input_block *, struct data_in *,
+tree streamer_read_string_cst (class data_in *, class lto_input_block *);
+tree streamer_read_chain (class lto_input_block *, class data_in *);
+tree streamer_alloc_tree (class lto_input_block *, class data_in *,
enum LTO_tags);
-void streamer_read_tree_body (struct lto_input_block *, struct data_in *, tree);
-tree streamer_get_pickled_tree (struct lto_input_block *, struct data_in *);
-void streamer_read_tree_bitfields (struct lto_input_block *,
- struct data_in *, tree);
+void streamer_read_tree_body (class lto_input_block *, class data_in *, tree);
+tree streamer_get_pickled_tree (class lto_input_block *, class data_in *);
+void streamer_read_tree_bitfields (class lto_input_block *,
+ class data_in *, tree);
/* In tree-streamer-out.c. */
void streamer_write_string_cst (struct output_block *,
@@ -117,7 +117,7 @@ static inline machine_mode
bp_unpack_machine_mode (struct bitpack_d *bp)
{
return (machine_mode)
- ((struct lto_input_block *)
+ ((class lto_input_block *)
bp->stream)->mode_table[bp_unpack_enum (bp, machine_mode, 1 << 8)];
}
diff --git a/gcc/tree-switch-conversion.c b/gcc/tree-switch-conversion.c
index 5f8ed46f496..776db77f53a 100644
--- a/gcc/tree-switch-conversion.c
+++ b/gcc/tree-switch-conversion.c
@@ -1448,8 +1448,8 @@ bit_test_cluster::is_beneficial (const vec<cluster *> &clusters,
int
case_bit_test::cmp (const void *p1, const void *p2)
{
- const struct case_bit_test *const d1 = (const struct case_bit_test *) p1;
- const struct case_bit_test *const d2 = (const struct case_bit_test *) p2;
+ const case_bit_test *const d1 = (const case_bit_test *) p1;
+ const case_bit_test *const d2 = (const case_bit_test *) p2;
if (d2->bits != d1->bits)
return d2->bits - d1->bits;
@@ -1480,7 +1480,7 @@ void
bit_test_cluster::emit (tree index_expr, tree index_type,
tree, basic_block default_bb)
{
- struct case_bit_test test[m_max_case_bit_tests] = { {} };
+ case_bit_test test[m_max_case_bit_tests] = { {} };
unsigned int i, j, k;
unsigned int count;
diff --git a/gcc/tree-vect-data-refs.c b/gcc/tree-vect-data-refs.c
index 1af36d8ecbd..6390b1949b5 100644
--- a/gcc/tree-vect-data-refs.c
+++ b/gcc/tree-vect-data-refs.c
@@ -183,7 +183,7 @@ vect_get_smallest_scalar_type (stmt_vec_info stmt_info,
static opt_result
vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo)
{
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
if ((unsigned) PARAM_VALUE (PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS) == 0)
return opt_result::failure_at (vect_location,
@@ -306,7 +306,7 @@ vect_analyze_possibly_independent_ddr (data_dependence_relation *ddr,
loop_vec_info loop_vinfo,
int loop_depth, unsigned int *max_vf)
{
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
lambda_vector dist_v;
unsigned int i;
FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
@@ -363,7 +363,7 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
unsigned int *max_vf)
{
unsigned int i;
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
struct data_reference *dra = DDR_A (ddr);
struct data_reference *drb = DDR_B (ddr);
dr_vec_info *dr_info_a = loop_vinfo->lookup_dr (dra);
@@ -867,7 +867,7 @@ void
vect_record_base_alignments (vec_info *vinfo)
{
loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
- struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
+ class loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
data_reference *dr;
unsigned int i;
FOR_EACH_VEC_ELT (vinfo->shared->datarefs, i, dr)
@@ -914,7 +914,7 @@ vect_compute_data_ref_alignment (dr_vec_info *dr_info)
stmt_vec_info stmt_info = dr_info->stmt;
vec_base_alignments *base_alignments = &stmt_info->vinfo->base_alignments;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- struct loop *loop = NULL;
+ class loop *loop = NULL;
tree ref = DR_REF (dr_info->dr);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
@@ -1659,7 +1659,7 @@ opt_result
vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
{
vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
enum dr_alignment_support supportable_dr_alignment;
dr_vec_info *first_store = NULL;
dr_vec_info *dr0_info = NULL;
@@ -1822,7 +1822,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
computation will be invariant in the outermost loop. */
else if (same_align_drs_max == same_align_drs)
{
- struct loop *ivloop0, *ivloop;
+ class loop *ivloop0, *ivloop;
ivloop0 = outermost_invariant_loop_for_expr
(loop, DR_BASE_ADDRESS (dr0_info->dr));
ivloop = outermost_invariant_loop_for_expr
@@ -2710,7 +2710,7 @@ vect_analyze_data_ref_access (dr_vec_info *dr_info)
tree scalar_type = TREE_TYPE (DR_REF (dr));
stmt_vec_info stmt_info = dr_info->stmt;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- struct loop *loop = NULL;
+ class loop *loop = NULL;
if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
return true;
@@ -3730,7 +3730,7 @@ vect_check_gather_scatter (stmt_vec_info stmt_info, loop_vec_info loop_vinfo,
{
HOST_WIDE_INT scale = 1;
poly_int64 pbitpos, pbitsize;
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
tree offtype = NULL_TREE;
tree decl = NULL_TREE, base, off;
@@ -4162,7 +4162,7 @@ vect_find_stmt_data_reference (loop_p loop, gimple *stmt,
opt_result
vect_analyze_data_refs (vec_info *vinfo, poly_uint64 *min_vf, bool *fatal)
{
- struct loop *loop = NULL;
+ class loop *loop = NULL;
unsigned int i;
struct data_reference *dr;
tree scalar_type;
@@ -4673,16 +4673,16 @@ vect_create_addr_base_for_vector_ref (stmt_vec_info stmt_info,
tree
vect_create_data_ref_ptr (stmt_vec_info stmt_info, tree aggr_type,
- struct loop *at_loop, tree offset,
+ class loop *at_loop, tree offset,
tree *initial_address, gimple_stmt_iterator *gsi,
gimple **ptr_incr, bool only_init,
tree byte_offset, tree iv_step)
{
const char *base_name;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- struct loop *loop = NULL;
+ class loop *loop = NULL;
bool nested_in_vect_loop = false;
- struct loop *containing_loop = NULL;
+ class loop *containing_loop = NULL;
tree aggr_ptr_type;
tree aggr_ptr;
tree new_temp;
@@ -5427,13 +5427,13 @@ vect_setup_realignment (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
tree *realignment_token,
enum dr_alignment_support alignment_support_scheme,
tree init_addr,
- struct loop **at_loop)
+ class loop **at_loop)
{
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
struct data_reference *dr = dr_info->dr;
- struct loop *loop = NULL;
+ class loop *loop = NULL;
edge pe = NULL;
tree scalar_dest = gimple_assign_lhs (stmt_info->stmt);
tree vec_dest;
@@ -5448,8 +5448,8 @@ vect_setup_realignment (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
gimple_seq stmts = NULL;
bool compute_in_loop = false;
bool nested_in_vect_loop = false;
- struct loop *containing_loop = (gimple_bb (stmt_info->stmt))->loop_father;
- struct loop *loop_for_initial_load = NULL;
+ class loop *containing_loop = (gimple_bb (stmt_info->stmt))->loop_father;
+ class loop *loop_for_initial_load = NULL;
if (loop_vinfo)
{
@@ -6459,7 +6459,7 @@ vect_supportable_dr_alignment (dr_vec_info *dr_info,
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
machine_mode mode = TYPE_MODE (vectype);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- struct loop *vect_loop = NULL;
+ class loop *vect_loop = NULL;
bool nested_in_vect_loop = false;
if (aligned_access_p (dr_info) && !check_aligned_accesses)
diff --git a/gcc/tree-vect-loop-manip.c b/gcc/tree-vect-loop-manip.c
index bd8fffb1704..5c25441c70a 100644
--- a/gcc/tree-vect-loop-manip.c
+++ b/gcc/tree-vect-loop-manip.c
@@ -89,8 +89,8 @@ rename_variables_in_bb (basic_block bb, bool rename_from_outer_loop)
ssa_op_iter iter;
edge e;
edge_iterator ei;
- struct loop *loop = bb->loop_father;
- struct loop *outer_loop = NULL;
+ class loop *loop = bb->loop_father;
+ class loop *outer_loop = NULL;
if (rename_from_outer_loop)
{
@@ -258,7 +258,7 @@ adjust_phi_and_debug_stmts (gimple *update_phi, edge e, tree new_def)
value that it should have on subsequent iterations. */
static void
-vect_set_loop_mask (struct loop *loop, tree mask, tree init_mask,
+vect_set_loop_mask (class loop *loop, tree mask, tree init_mask,
tree next_mask)
{
gphi *phi = create_phi_node (mask, loop->header);
@@ -269,7 +269,7 @@ vect_set_loop_mask (struct loop *loop, tree mask, tree init_mask,
/* Add SEQ to the end of LOOP's preheader block. */
static void
-add_preheader_seq (struct loop *loop, gimple_seq seq)
+add_preheader_seq (class loop *loop, gimple_seq seq)
{
if (seq)
{
@@ -282,7 +282,7 @@ add_preheader_seq (struct loop *loop, gimple_seq seq)
/* Add SEQ to the beginning of LOOP's header block. */
static void
-add_header_seq (struct loop *loop, gimple_seq seq)
+add_header_seq (class loop *loop, gimple_seq seq)
{
if (seq)
{
@@ -406,7 +406,7 @@ vect_maybe_permute_loop_masks (gimple_seq *seq, rgroup_masks *dest_rgm,
would ever hit a value that produces a set of all-false masks for RGM. */
static tree
-vect_set_loop_masks_directly (struct loop *loop, loop_vec_info loop_vinfo,
+vect_set_loop_masks_directly (class loop *loop, loop_vec_info loop_vinfo,
gimple_seq *preheader_seq,
gimple_stmt_iterator loop_cond_gsi,
rgroup_masks *rgm, tree niters, tree niters_skip,
@@ -635,7 +635,7 @@ vect_set_loop_masks_directly (struct loop *loop, loop_vec_info loop_vinfo,
final gcond. */
static gcond *
-vect_set_loop_condition_masked (struct loop *loop, loop_vec_info loop_vinfo,
+vect_set_loop_condition_masked (class loop *loop, loop_vec_info loop_vinfo,
tree niters, tree final_iv,
bool niters_maybe_zero,
gimple_stmt_iterator loop_cond_gsi)
@@ -743,7 +743,7 @@ vect_set_loop_condition_masked (struct loop *loop, loop_vec_info loop_vinfo,
are no loop masks. */
static gcond *
-vect_set_loop_condition_unmasked (struct loop *loop, tree niters,
+vect_set_loop_condition_unmasked (class loop *loop, tree niters,
tree step, tree final_iv,
bool niters_maybe_zero,
gimple_stmt_iterator loop_cond_gsi)
@@ -896,7 +896,7 @@ vect_set_loop_condition_unmasked (struct loop *loop, tree niters,
Assumption: the exit-condition of LOOP is the last stmt in the loop. */
void
-vect_set_loop_condition (struct loop *loop, loop_vec_info loop_vinfo,
+vect_set_loop_condition (class loop *loop, loop_vec_info loop_vinfo,
tree niters, tree step, tree final_iv,
bool niters_maybe_zero)
{
@@ -985,11 +985,11 @@ slpeel_duplicate_current_defs_from_edges (edge from, edge to)
basic blocks from SCALAR_LOOP instead of LOOP, but to either the
entry or exit of LOOP. */
-struct loop *
-slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *loop,
- struct loop *scalar_loop, edge e)
+class loop *
+slpeel_tree_duplicate_loop_to_edge_cfg (class loop *loop,
+ class loop *scalar_loop, edge e)
{
- struct loop *new_loop;
+ class loop *new_loop;
basic_block *new_bbs, *bbs, *pbbs;
bool at_exit;
bool was_imm_dom;
@@ -1214,7 +1214,7 @@ slpeel_add_loop_guard (basic_block guard_bb, tree cond,
*/
bool
-slpeel_can_duplicate_loop_p (const struct loop *loop, const_edge e)
+slpeel_can_duplicate_loop_p (const class loop *loop, const_edge e)
{
edge exit_e = single_exit (loop);
edge entry_e = loop_preheader_edge (loop);
@@ -1244,7 +1244,7 @@ slpeel_can_duplicate_loop_p (const struct loop *loop, const_edge e)
uses should be renamed. */
static void
-create_lcssa_for_virtual_phi (struct loop *loop)
+create_lcssa_for_virtual_phi (class loop *loop)
{
gphi_iterator gsi;
edge exit_e = single_exit (loop);
@@ -1289,7 +1289,7 @@ create_lcssa_for_virtual_phi (struct loop *loop)
Return the loop location if succeed and NULL if not. */
dump_user_location_t
-find_loop_location (struct loop *loop)
+find_loop_location (class loop *loop)
{
gimple *stmt = NULL;
basic_block bb;
@@ -1351,7 +1351,7 @@ iv_phi_p (stmt_vec_info stmt_info)
bool
vect_can_advance_ivs_p (loop_vec_info loop_vinfo)
{
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block bb = loop->header;
gphi_iterator gsi;
@@ -1465,7 +1465,7 @@ vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo,
tree niters, edge update_e)
{
gphi_iterator gsi, gsi1;
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block update_bb = update_e->dest;
basic_block exit_bb = single_exit (loop)->dest;
@@ -1991,7 +1991,7 @@ vect_gen_vector_loop_niters_mult_vf (loop_vec_info loop_vinfo,
{
/* We should be using a step_vector of VF if VF is variable. */
int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo).to_constant ();
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
tree type = TREE_TYPE (niters_vector);
tree log_vf = build_int_cst (type, exact_log2 (vf));
basic_block exit_bb = single_exit (loop)->dest;
@@ -2059,11 +2059,11 @@ vect_gen_vector_loop_niters_mult_vf (loop_vec_info loop_vinfo,
static void
slpeel_update_phi_nodes_for_loops (loop_vec_info loop_vinfo,
- struct loop *first, struct loop *second,
+ class loop *first, class loop *second,
bool create_lcssa_for_iv_phis)
{
gphi_iterator gsi_update, gsi_orig;
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
edge first_latch_e = EDGE_SUCC (first->latch, 0);
edge second_preheader_e = loop_preheader_edge (second);
@@ -2147,8 +2147,8 @@ slpeel_update_phi_nodes_for_loops (loop_vec_info loop_vinfo,
in the update_loop's PHI node with the result of new PHI result. */
static void
-slpeel_update_phi_nodes_for_guard1 (struct loop *skip_loop,
- struct loop *update_loop,
+slpeel_update_phi_nodes_for_guard1 (class loop *skip_loop,
+ class loop *update_loop,
edge guard_edge, edge merge_edge)
{
location_t merge_loc, guard_loc;
@@ -2188,7 +2188,7 @@ slpeel_update_phi_nodes_for_guard1 (struct loop *skip_loop,
NULL. */
static tree
-find_guard_arg (struct loop *loop, struct loop *epilog ATTRIBUTE_UNUSED,
+find_guard_arg (class loop *loop, class loop *epilog ATTRIBUTE_UNUSED,
gphi *lcssa_phi)
{
gphi_iterator gsi;
@@ -2259,7 +2259,7 @@ find_guard_arg (struct loop *loop, struct loop *epilog ATTRIBUTE_UNUSED,
in exit_bb will also be updated. */
static void
-slpeel_update_phi_nodes_for_guard2 (struct loop *loop, struct loop *epilog,
+slpeel_update_phi_nodes_for_guard2 (class loop *loop, class loop *epilog,
edge guard_edge, edge merge_edge)
{
gphi_iterator gsi;
@@ -2308,7 +2308,7 @@ slpeel_update_phi_nodes_for_guard2 (struct loop *loop, struct loop *epilog,
the arg of its loop closed ssa PHI needs to be updated. */
static void
-slpeel_update_phi_nodes_for_lcssa (struct loop *epilog)
+slpeel_update_phi_nodes_for_lcssa (class loop *epilog)
{
gphi_iterator gsi;
basic_block exit_bb = single_exit (epilog)->dest;
@@ -2397,7 +2397,7 @@ slpeel_update_phi_nodes_for_lcssa (struct loop *epilog)
versioning conditions if loop versioning is needed. */
-struct loop *
+class loop *
vect_do_peeling (loop_vec_info loop_vinfo, tree niters, tree nitersm1,
tree *niters_vector, tree *step_vector,
tree *niters_vector_mult_vf_var, int th,
@@ -2439,8 +2439,8 @@ vect_do_peeling (loop_vec_info loop_vinfo, tree niters, tree nitersm1,
prob_prolog = prob_epilog = profile_probability::guessed_always ()
.apply_scale (estimated_vf - 1, estimated_vf);
- struct loop *prolog, *epilog = NULL, *loop = LOOP_VINFO_LOOP (loop_vinfo);
- struct loop *first_loop = loop;
+ class loop *prolog, *epilog = NULL, *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *first_loop = loop;
bool irred_flag = loop_preheader_edge (loop)->flags & EDGE_IRREDUCIBLE_LOOP;
create_lcssa_for_virtual_phi (loop);
update_ssa (TODO_update_ssa_only_virtuals);
@@ -2503,7 +2503,7 @@ vect_do_peeling (loop_vec_info loop_vinfo, tree niters, tree nitersm1,
}
dump_user_location_t loop_loc = find_loop_location (loop);
- struct loop *scalar_loop = LOOP_VINFO_SCALAR_LOOP (loop_vinfo);
+ class loop *scalar_loop = LOOP_VINFO_SCALAR_LOOP (loop_vinfo);
if (prolog_peeling)
{
e = loop_preheader_edge (loop);
@@ -2965,13 +2965,13 @@ vect_create_cond_for_alias_checks (loop_vec_info loop_vinfo, tree * cond_expr)
The versioning precondition(s) are placed in *COND_EXPR and
*COND_EXPR_STMT_LIST. */
-struct loop *
+class loop *
vect_loop_versioning (loop_vec_info loop_vinfo,
unsigned int th, bool check_profitability,
poly_uint64 versioning_threshold)
{
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *nloop;
- struct loop *scalar_loop = LOOP_VINFO_SCALAR_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *nloop;
+ class loop *scalar_loop = LOOP_VINFO_SCALAR_LOOP (loop_vinfo);
basic_block condition_bb;
gphi_iterator gsi;
gimple_stmt_iterator cond_exp_gsi;
@@ -3058,7 +3058,7 @@ vect_loop_versioning (loop_vec_info loop_vinfo,
/* Compute the outermost loop cond_expr and cond_expr_stmt_list are
invariant in. */
- struct loop *outermost = outermost_invariant_loop_for_expr (loop, cond_expr);
+ class loop *outermost = outermost_invariant_loop_for_expr (loop, cond_expr);
for (gimple_stmt_iterator gsi = gsi_start (cond_expr_stmt_list);
!gsi_end_p (gsi); gsi_next (&gsi))
{
@@ -3075,7 +3075,7 @@ vect_loop_versioning (loop_vec_info loop_vinfo,
/* Search for the outermost loop we can version. Avoid versioning of
non-perfect nests but allow if-conversion versioned loops inside. */
- struct loop *loop_to_version = loop;
+ class loop *loop_to_version = loop;
if (flow_loop_nested_p (outermost, loop))
{
if (dump_enabled_p ())
@@ -3148,7 +3148,7 @@ vect_loop_versioning (loop_vec_info loop_vinfo,
/* Kill off IFN_LOOP_VECTORIZED_CALL in the copy, nobody will
reap those otherwise; they also refer to the original
loops. */
- struct loop *l = loop;
+ class loop *l = loop;
while (gimple *call = vect_loop_vectorized_call (l))
{
call = SSA_NAME_DEF_STMT (get_current_def (gimple_call_lhs (call)));
diff --git a/gcc/tree-vect-loop.c b/gcc/tree-vect-loop.c
index b49ab152012..b0cbbac0cb5 100644
--- a/gcc/tree-vect-loop.c
+++ b/gcc/tree-vect-loop.c
@@ -286,7 +286,7 @@ vect_determine_vf_for_stmt (stmt_vec_info stmt_info, poly_uint64 *vf,
static opt_result
vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
{
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
unsigned nbbs = loop->num_nodes;
poly_uint64 vectorization_factor = 1;
@@ -481,7 +481,7 @@ vect_inner_phi_in_double_reduction_p (stmt_vec_info stmt_info, gphi *phi)
enclosing LOOP). */
static void
-vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
+vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, class loop *loop)
{
basic_block bb = loop->header;
tree init, step;
@@ -633,7 +633,7 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
static void
vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
{
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
@@ -714,11 +714,11 @@ vect_fixup_scalar_cycles_with_patterns (loop_vec_info loop_vinfo)
static gcond *
-vect_get_loop_niters (struct loop *loop, tree *assumptions,
+vect_get_loop_niters (class loop *loop, tree *assumptions,
tree *number_of_iterations, tree *number_of_iterationsm1)
{
edge exit = single_exit (loop);
- struct tree_niter_desc niter_desc;
+ class tree_niter_desc niter_desc;
tree niter_assumptions, niter, may_be_zero;
gcond *cond = get_loop_exit_condition (loop);
@@ -793,7 +793,7 @@ vect_get_loop_niters (struct loop *loop, tree *assumptions,
static bool
bb_in_loop_p (const_basic_block bb, const void *data)
{
- const struct loop *const loop = (const struct loop *)data;
+ const class loop *const loop = (const class loop *)data;
if (flow_bb_inside_loop_p (loop, bb))
return true;
return false;
@@ -803,7 +803,7 @@ bb_in_loop_p (const_basic_block bb, const void *data)
/* Create and initialize a new loop_vec_info struct for LOOP_IN, as well as
stmt_vec_info structs for all the stmts in LOOP_IN. */
-_loop_vec_info::_loop_vec_info (struct loop *loop_in, vec_info_shared *shared)
+_loop_vec_info::_loop_vec_info (class loop *loop_in, vec_info_shared *shared)
: vec_info (vec_info::loop, init_cost (loop_in), shared),
loop (loop_in),
bbs (XCNEWVEC (basic_block, loop->num_nodes)),
@@ -1029,7 +1029,7 @@ vect_get_max_nscalars_per_iter (loop_vec_info loop_vinfo)
static bool
vect_verify_full_masking (loop_vec_info loop_vinfo)
{
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
unsigned int min_ni_width;
unsigned int max_nscalars_per_iter
= vect_get_max_nscalars_per_iter (loop_vinfo);
@@ -1122,7 +1122,7 @@ vect_verify_full_masking (loop_vec_info loop_vinfo)
static void
vect_compute_single_scalar_iteration_cost (loop_vec_info loop_vinfo)
{
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
int nbbs = loop->num_nodes, factor;
int innerloop_iters, i;
@@ -1204,7 +1204,7 @@ vect_compute_single_scalar_iteration_cost (loop_vec_info loop_vinfo)
niter could be analyzed under some assumptions. */
opt_result
-vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
+vect_analyze_loop_form_1 (class loop *loop, gcond **loop_cond,
tree *assumptions, tree *number_of_iterationsm1,
tree *number_of_iterations, gcond **inner_loop_cond)
{
@@ -1239,7 +1239,7 @@ vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
}
else
{
- struct loop *innerloop = loop->inner;
+ class loop *innerloop = loop->inner;
edge entryedge;
/* Nested loop. We currently require that the loop is doubly-nested,
@@ -1356,7 +1356,7 @@ vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
/* Analyze LOOP form and return a loop_vec_info if it is of suitable form. */
opt_loop_vec_info
-vect_analyze_loop_form (struct loop *loop, vec_info_shared *shared)
+vect_analyze_loop_form (class loop *loop, vec_info_shared *shared)
{
tree assumptions, number_of_iterations, number_of_iterationsm1;
gcond *loop_cond, *inner_loop_cond = NULL;
@@ -1419,7 +1419,7 @@ vect_analyze_loop_form (struct loop *loop, vec_info_shared *shared)
static void
vect_update_vf_for_slp (loop_vec_info loop_vinfo)
{
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
int nbbs = loop->num_nodes;
poly_uint64 vectorization_factor;
@@ -1515,7 +1515,7 @@ vect_active_double_reduction_p (stmt_vec_info stmt_info)
static opt_result
vect_analyze_loop_operations (loop_vec_info loop_vinfo)
{
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
int nbbs = loop->num_nodes;
int i;
@@ -1661,7 +1661,7 @@ vect_analyze_loop_operations (loop_vec_info loop_vinfo)
static int
vect_analyze_loop_costing (loop_vec_info loop_vinfo)
{
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
/* Only fully-masked loops can have iteration counts less than the
@@ -2328,7 +2328,7 @@ again:
loop_vec_info struct. If ORIG_LOOP_VINFO is not NULL epilogue must
be vectorized. */
opt_loop_vec_info
-vect_analyze_loop (struct loop *loop, loop_vec_info orig_loop_vinfo,
+vect_analyze_loop (class loop *loop, loop_vec_info orig_loop_vinfo,
vec_info_shared *shared)
{
auto_vector_sizes vector_sizes;
@@ -2532,7 +2532,7 @@ neutral_op_for_slp_reduction (slp_tree slp_node, tree_code code,
stmt_vec_info stmt_vinfo = stmts[0];
tree vector_type = STMT_VINFO_VECTYPE (stmt_vinfo);
tree scalar_type = TREE_TYPE (vector_type);
- struct loop *loop = gimple_bb (stmt_vinfo->stmt)->loop_father;
+ class loop *loop = gimple_bb (stmt_vinfo->stmt)->loop_father;
gcc_assert (loop);
switch (code)
@@ -2611,8 +2611,8 @@ static bool
vect_is_slp_reduction (loop_vec_info loop_info, gimple *phi,
gimple *first_stmt)
{
- struct loop *loop = (gimple_bb (phi))->loop_father;
- struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
+ class loop *loop = (gimple_bb (phi))->loop_father;
+ class loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
enum tree_code code;
gimple *loop_use_stmt = NULL;
stmt_vec_info use_stmt_info;
@@ -2951,8 +2951,8 @@ vect_is_simple_reduction (loop_vec_info loop_info, stmt_vec_info phi_info,
enum vect_reduction_type *v_reduc_type)
{
gphi *phi = as_a <gphi *> (phi_info->stmt);
- struct loop *loop = (gimple_bb (phi))->loop_father;
- struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
+ class loop *loop = (gimple_bb (phi))->loop_father;
+ class loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
bool nested_in_vect_loop = flow_loop_nested_p (vect_loop, loop);
gimple *phi_use_stmt = NULL;
enum tree_code orig_code, code;
@@ -3977,7 +3977,7 @@ vect_model_reduction_cost (stmt_vec_info stmt_info, internal_fn reduc_fn,
tree vectype;
machine_mode mode;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- struct loop *loop = NULL;
+ class loop *loop = NULL;
if (loop_vinfo)
loop = LOOP_VINFO_LOOP (loop_vinfo);
@@ -4208,7 +4208,7 @@ get_initial_def_for_reduction (stmt_vec_info stmt_vinfo, tree init_val,
tree *adjustment_def)
{
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
tree scalar_type = TREE_TYPE (init_val);
tree vectype = get_vectype_for_scalar_type (scalar_type);
enum tree_code code = gimple_assign_rhs_code (stmt_vinfo->stmt);
@@ -4329,7 +4329,7 @@ get_initial_defs_for_reduction (slp_tree slp_node,
tree vector_type;
unsigned int group_size = stmts.length ();
unsigned int i;
- struct loop *loop;
+ class loop *loop;
vector_type = STMT_VINFO_VECTYPE (stmt_vinfo);
@@ -4517,7 +4517,7 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs,
tree vectype;
machine_mode mode;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
basic_block exit_bb;
tree scalar_dest;
tree scalar_type;
@@ -5955,7 +5955,7 @@ vectorize_fold_left_reduction (stmt_vec_info stmt_info,
int reduc_index, vec_loop_masks *masks)
{
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
stmt_vec_info new_stmt_info = NULL;
internal_fn mask_reduc_fn = get_masked_reduction_fn (reduc_fn, vectype_in);
@@ -6098,7 +6098,7 @@ vectorize_fold_left_reduction (stmt_vec_info stmt_info,
does not cause overflow. */
static bool
-is_nonwrapping_integer_induction (stmt_vec_info stmt_vinfo, struct loop *loop)
+is_nonwrapping_integer_induction (stmt_vec_info stmt_vinfo, class loop *loop)
{
gphi *phi = as_a <gphi *> (stmt_vinfo->stmt);
tree base = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo);
@@ -6256,7 +6256,7 @@ vectorizable_reduction (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
tree vectype_in = NULL_TREE;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
enum tree_code code, orig_code;
internal_fn reduc_fn;
machine_mode vec_mode;
@@ -6280,7 +6280,7 @@ vectorizable_reduction (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
bool nested_cycle = false, found_nested_cycle_def = false;
bool double_reduc = false;
basic_block def_bb;
- struct loop * def_stmt_loop;
+ class loop * def_stmt_loop;
tree def_arg;
auto_vec<tree> vec_oprnds0;
auto_vec<tree> vec_oprnds1;
@@ -7468,10 +7468,10 @@ vectorizable_induction (stmt_vec_info stmt_info,
stmt_vector_for_cost *cost_vec)
{
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
unsigned ncopies;
bool nested_in_vect_loop = false;
- struct loop *iv_loop;
+ class loop *iv_loop;
tree vec_def;
edge pe = loop_preheader_edge (loop);
basic_block new_bb;
@@ -8012,7 +8012,7 @@ vectorizable_live_operation (stmt_vec_info stmt_info,
stmt_vector_for_cost *)
{
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
imm_use_iterator imm_iter;
tree lhs, lhs_type, bitsize, vec_bitsize;
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
@@ -8220,7 +8220,7 @@ vectorizable_live_operation (stmt_vec_info stmt_info,
/* Kill any debug uses outside LOOP of SSA names defined in STMT_INFO. */
static void
-vect_loop_kill_debug_uses (struct loop *loop, stmt_vec_info stmt_info)
+vect_loop_kill_debug_uses (class loop *loop, stmt_vec_info stmt_info)
{
ssa_op_iter op_iter;
imm_use_iterator imm_iter;
@@ -8276,7 +8276,7 @@ loop_niters_no_overflow (loop_vec_info loop_vinfo)
}
widest_int max;
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
/* Check the upper bound of loop niters. */
if (get_max_loop_iterations (loop, &max))
{
@@ -8384,7 +8384,7 @@ vect_get_loop_mask (gimple_stmt_iterator *gsi, vec_loop_masks *masks,
by factor VF. */
static void
-scale_profile_for_vect_loop (struct loop *loop, unsigned vf)
+scale_profile_for_vect_loop (class loop *loop, unsigned vf)
{
edge preheader = loop_preheader_edge (loop);
/* Reduce loop iterations by the vectorization factor. */
@@ -8422,7 +8422,7 @@ static void
vect_transform_loop_stmt (loop_vec_info loop_vinfo, stmt_vec_info stmt_info,
gimple_stmt_iterator *gsi, stmt_vec_info *seen_store)
{
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
if (dump_enabled_p ())
@@ -8467,11 +8467,11 @@ vect_transform_loop_stmt (loop_vec_info loop_vinfo, stmt_vec_info stmt_info,
stmts in the loop, and update the loop exit condition.
Returns scalar epilogue loop if any. */
-struct loop *
+class loop *
vect_transform_loop (loop_vec_info loop_vinfo)
{
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
- struct loop *epilogue = NULL;
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *epilogue = NULL;
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
int nbbs = loop->num_nodes;
int i;
@@ -8528,7 +8528,7 @@ vect_transform_loop (loop_vec_info loop_vinfo)
versioning_threshold);
check_profitability = false;
}
- struct loop *sloop
+ class loop *sloop
= vect_loop_versioning (loop_vinfo, th, check_profitability,
versioning_threshold);
sloop->force_vectorize = false;
@@ -8894,13 +8894,13 @@ vect_transform_loop (loop_vec_info loop_vinfo)
*/
void
-optimize_mask_stores (struct loop *loop)
+optimize_mask_stores (class loop *loop)
{
basic_block *bbs = get_loop_body (loop);
unsigned nbbs = loop->num_nodes;
unsigned i;
basic_block bb;
- struct loop *bb_loop;
+ class loop *bb_loop;
gimple_stmt_iterator gsi;
gimple *stmt;
auto_vec<gimple *> worklist;
@@ -9089,7 +9089,7 @@ widest_int
vect_iv_limit_for_full_masking (loop_vec_info loop_vinfo)
{
tree niters_skip = LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo);
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
unsigned HOST_WIDE_INT max_vf = vect_max_vf (loop_vinfo);
/* Calculate the value that the induction variable must be able
diff --git a/gcc/tree-vect-patterns.c b/gcc/tree-vect-patterns.c
index 0157ba4992e..8430c98acc6 100644
--- a/gcc/tree-vect-patterns.c
+++ b/gcc/tree-vect-patterns.c
@@ -859,7 +859,7 @@ vect_reassociating_reduction_p (stmt_vec_info stmt_info, tree_code code,
/* We don't allow changing the order of the computation in the inner-loop
when doing outer-loop vectorization. */
- struct loop *loop = LOOP_VINFO_LOOP (loop_info);
+ class loop *loop = LOOP_VINFO_LOOP (loop_info);
if (loop && nested_in_vect_loop_p (loop, stmt_info))
return false;
@@ -4664,7 +4664,7 @@ vect_determine_precisions (vec_info *vinfo)
if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo))
{
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
unsigned int nbbs = loop->num_nodes;
@@ -4954,7 +4954,7 @@ vect_pattern_recog_1 (vect_recog_func *recog_func, stmt_vec_info stmt_info)
void
vect_pattern_recog (vec_info *vinfo)
{
- struct loop *loop;
+ class loop *loop;
basic_block *bbs;
unsigned int nbbs;
gimple_stmt_iterator si;
diff --git a/gcc/tree-vect-slp.c b/gcc/tree-vect-slp.c
index 1aaf10eee2c..d172c3a75ae 100644
--- a/gcc/tree-vect-slp.c
+++ b/gcc/tree-vect-slp.c
@@ -2019,7 +2019,7 @@ vect_analyze_slp_instance (vec_info *vinfo,
else
{
/* Create a new SLP instance. */
- new_instance = XNEW (struct _slp_instance);
+ new_instance = XNEW (class _slp_instance);
SLP_INSTANCE_TREE (new_instance) = node;
SLP_INSTANCE_GROUP_SIZE (new_instance) = group_size;
SLP_INSTANCE_UNROLLING_FACTOR (new_instance) = unrolling_factor;
diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c
index 10ce2a22f85..e921225b5ec 100644
--- a/gcc/tree-vect-stmts.c
+++ b/gcc/tree-vect-stmts.c
@@ -62,7 +62,7 @@ along with GCC; see the file COPYING3. If not see
/* Return the vectorized type for the given statement. */
tree
-stmt_vectype (struct _stmt_vec_info *stmt_info)
+stmt_vectype (class _stmt_vec_info *stmt_info)
{
return STMT_VINFO_VECTYPE (stmt_info);
}
@@ -70,12 +70,12 @@ stmt_vectype (struct _stmt_vec_info *stmt_info)
/* Return TRUE iff the given statement is in an inner loop relative to
the loop being vectorized. */
bool
-stmt_in_inner_loop_p (struct _stmt_vec_info *stmt_info)
+stmt_in_inner_loop_p (class _stmt_vec_info *stmt_info)
{
gimple *stmt = STMT_VINFO_STMT (stmt_info);
basic_block bb = gimple_bb (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- struct loop* loop;
+ class loop* loop;
if (!loop_vinfo)
return false;
@@ -297,7 +297,7 @@ static bool
vect_stmt_relevant_p (stmt_vec_info stmt_info, loop_vec_info loop_vinfo,
enum vect_relevant *relevant, bool *live_p)
{
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
ssa_op_iter op_iter;
imm_use_iterator imm_iter;
use_operand_p use_p;
@@ -610,7 +610,7 @@ process_use (stmt_vec_info stmt_vinfo, tree use, loop_vec_info loop_vinfo,
opt_result
vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo, bool *fatal)
{
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
unsigned int nbbs = loop->num_nodes;
gimple_stmt_iterator si;
@@ -1415,7 +1415,7 @@ vect_init_vector_1 (stmt_vec_info stmt_vinfo, gimple *new_stmt,
if (loop_vinfo)
{
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block new_bb;
edge pe;
@@ -2041,7 +2041,7 @@ vect_truncate_gather_scatter_offset (stmt_vec_info stmt_info,
unsigned HOST_WIDE_INT count = vect_max_vf (loop_vinfo) - 1;
/* Try lowering COUNT to the number of scalar latch iterations. */
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
widest_int max_iters;
if (max_loop_iterations (loop, &max_iters)
&& max_iters < count)
@@ -2209,7 +2209,7 @@ get_group_load_store_type (stmt_vec_info stmt_info, tree vectype, bool slp,
{
vec_info *vinfo = stmt_info->vinfo;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
+ class loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
dr_vec_info *first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
unsigned int group_size = DR_GROUP_SIZE (first_stmt_info);
@@ -2727,7 +2727,7 @@ vect_build_gather_load_calls (stmt_vec_info stmt_info,
tree mask)
{
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
int ncopies = vect_get_num_copies (loop_vinfo, vectype);
@@ -2969,7 +2969,7 @@ vect_build_gather_load_calls (stmt_vec_info stmt_info,
containing loop. */
static void
-vect_get_gather_scatter_ops (struct loop *loop, stmt_vec_info stmt_info,
+vect_get_gather_scatter_ops (class loop *loop, stmt_vec_info stmt_info,
gather_scatter_info *gs_info,
tree *dataref_ptr, tree *vec_offset)
{
@@ -3004,7 +3004,7 @@ vect_get_strided_load_store_ops (stmt_vec_info stmt_info,
tree *dataref_bump, tree *vec_offset)
{
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
- struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
gimple_seq stmts;
@@ -3759,7 +3759,7 @@ struct simd_call_arg_info
*ARGINFO. */
static void
-vect_simd_lane_linear (tree op, struct loop *loop,
+vect_simd_lane_linear (tree op, class loop *loop,
struct simd_call_arg_info *arginfo)
{
gimple *def_stmt = SSA_NAME_DEF_STMT (op);
@@ -3856,7 +3856,7 @@ vectorizable_simd_clone_call (stmt_vec_info stmt_info,
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
vec_info *vinfo = stmt_info->vinfo;
- struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
+ class loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
tree fndecl, new_temp;
int ncopies, j;
auto_vec<simd_call_arg_info> arginfo;
@@ -7200,7 +7200,7 @@ vectorizable_store (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
tree vec_oprnd = NULL_TREE;
tree elem_type;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- struct loop *loop = NULL;
+ class loop *loop = NULL;
machine_mode vec_mode;
tree dummy;
enum dr_alignment_support alignment_support_scheme;
@@ -8349,7 +8349,7 @@ permute_vec_elements (tree x, tree y, tree mask_vec, stmt_vec_info stmt_info,
otherwise returns false. */
static bool
-hoist_defs_of_uses (stmt_vec_info stmt_info, struct loop *loop)
+hoist_defs_of_uses (stmt_vec_info stmt_info, class loop *loop)
{
ssa_op_iter i;
tree op;
@@ -8417,8 +8417,8 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
tree data_ref = NULL;
stmt_vec_info prev_stmt_info;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- struct loop *loop = NULL;
- struct loop *containing_loop = gimple_bb (stmt_info->stmt)->loop_father;
+ class loop *loop = NULL;
+ class loop *containing_loop = gimple_bb (stmt_info->stmt)->loop_father;
bool nested_in_vect_loop = false;
tree elem_type;
tree new_temp;
@@ -8442,7 +8442,7 @@ vectorizable_load (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
stmt_vec_info first_stmt_info;
stmt_vec_info first_stmt_info_for_drptr = NULL;
bool compute_in_loop = false;
- struct loop *at_loop;
+ class loop *at_loop;
int vec_num;
bool slp = (slp_node != NULL);
bool slp_perm = false;
@@ -10811,7 +10811,7 @@ vect_transform_stmt (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
|| STMT_VINFO_RELEVANT (stmt_info) ==
vect_used_in_outer_by_reduction))
{
- struct loop *innerloop = LOOP_VINFO_LOOP (
+ class loop *innerloop = LOOP_VINFO_LOOP (
STMT_VINFO_LOOP_VINFO (stmt_info))->inner;
imm_use_iterator imm_iter;
use_operand_p use_p;
@@ -11203,7 +11203,7 @@ supportable_widening_operation (enum tree_code code, stmt_vec_info stmt_info,
vec<tree> *interm_types)
{
loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
- struct loop *vect_loop = NULL;
+ class loop *vect_loop = NULL;
machine_mode vec_mode;
enum insn_code icode1, icode2;
optab optab1, optab2;
diff --git a/gcc/tree-vectorizer.c b/gcc/tree-vectorizer.c
index 868dc5c675c..173e6b51652 100644
--- a/gcc/tree-vectorizer.c
+++ b/gcc/tree-vectorizer.c
@@ -633,7 +633,7 @@ vec_info::replace_stmt (gimple_stmt_iterator *gsi, stmt_vec_info stmt_info,
stmt_vec_info
vec_info::new_stmt_vec_info (gimple *stmt)
{
- stmt_vec_info res = XCNEW (struct _stmt_vec_info);
+ stmt_vec_info res = XCNEW (class _stmt_vec_info);
res->vinfo = this;
res->stmt = stmt;
@@ -714,7 +714,7 @@ vec_info::free_stmt_vec_info (stmt_vec_info stmt_info)
clear loop constraint LOOP_C_FINITE. */
void
-vect_free_loop_info_assumptions (struct loop *loop)
+vect_free_loop_info_assumptions (class loop *loop)
{
scev_reset_htab ();
/* We need to explicitly reset upper bound information since they are
@@ -729,7 +729,7 @@ vect_free_loop_info_assumptions (struct loop *loop)
guarding it. */
gimple *
-vect_loop_vectorized_call (struct loop *loop, gcond **cond)
+vect_loop_vectorized_call (class loop *loop, gcond **cond)
{
basic_block bb = loop_preheader_edge (loop)->src;
gimple *g;
@@ -765,11 +765,11 @@ vect_loop_vectorized_call (struct loop *loop, gcond **cond)
internal call. */
static gimple *
-vect_loop_dist_alias_call (struct loop *loop)
+vect_loop_dist_alias_call (class loop *loop)
{
basic_block bb;
basic_block entry;
- struct loop *outer, *orig;
+ class loop *outer, *orig;
gimple_stmt_iterator gsi;
gimple *g;
@@ -824,7 +824,7 @@ set_uid_loop_bbs (loop_vec_info loop_vinfo, gimple *loop_vectorized_call)
tree arg = gimple_call_arg (loop_vectorized_call, 1);
basic_block *bbs;
unsigned int i;
- struct loop *scalar_loop = get_loop (cfun, tree_to_shwi (arg));
+ class loop *scalar_loop = get_loop (cfun, tree_to_shwi (arg));
LOOP_VINFO_SCALAR_LOOP (loop_vinfo) = scalar_loop;
gcc_checking_assert (vect_loop_vectorized_call (scalar_loop)
@@ -1047,7 +1047,7 @@ vectorize_loops (void)
unsigned int i;
unsigned int num_vectorized_loops = 0;
unsigned int vect_loops_num;
- struct loop *loop;
+ class loop *loop;
hash_table<simduid_to_vf> *simduid_to_vf_htab = NULL;
hash_table<simd_array_to_simduid> *simd_array_to_simduid_htab = NULL;
bool any_ifcvt_loops = false;
@@ -1098,7 +1098,7 @@ vectorize_loops (void)
&& vect_loop_vectorized_call (loop->inner))
{
tree arg = gimple_call_arg (loop_vectorized_call, 0);
- struct loop *vector_loop
+ class loop *vector_loop
= get_loop (cfun, tree_to_shwi (arg));
if (vector_loop && vector_loop != loop)
{
diff --git a/gcc/tree-vectorizer.h b/gcc/tree-vectorizer.h
index 6e76b7eaab5..1456cde4c2c 100644
--- a/gcc/tree-vectorizer.h
+++ b/gcc/tree-vectorizer.h
@@ -21,7 +21,7 @@ along with GCC; see the file COPYING3. If not see
#ifndef GCC_TREE_VECTORIZER_H
#define GCC_TREE_VECTORIZER_H
-typedef struct _stmt_vec_info *stmt_vec_info;
+typedef class _stmt_vec_info *stmt_vec_info;
#include "tree-data-ref.h"
#include "tree-hash-traits.h"
@@ -227,7 +227,7 @@ public:
stmt_vec_info lookup_stmt (gimple *);
stmt_vec_info lookup_def (tree);
stmt_vec_info lookup_single_use (tree);
- struct dr_vec_info *lookup_dr (data_reference *);
+ class dr_vec_info *lookup_dr (data_reference *);
void move_dr (stmt_vec_info, stmt_vec_info);
void remove_stmt (stmt_vec_info);
void replace_stmt (gimple_stmt_iterator *, stmt_vec_info, gimple *);
@@ -262,8 +262,8 @@ private:
void free_stmt_vec_info (stmt_vec_info);
};
-struct _loop_vec_info;
-struct _bb_vec_info;
+class _loop_vec_info;
+class _bb_vec_info;
template<>
template<>
@@ -383,11 +383,11 @@ typedef auto_vec<rgroup_masks> vec_loop_masks;
/*-----------------------------------------------------------------*/
typedef class _loop_vec_info : public vec_info {
public:
- _loop_vec_info (struct loop *, vec_info_shared *);
+ _loop_vec_info (class loop *, vec_info_shared *);
~_loop_vec_info ();
/* The loop to which this info struct refers to. */
- struct loop *loop;
+ class loop *loop;
/* The loop basic blocks. */
basic_block *bbs;
@@ -445,7 +445,7 @@ public:
tree iv_type;
/* Unknown DRs according to which loop was peeled. */
- struct dr_vec_info *unaligned_dr;
+ class dr_vec_info *unaligned_dr;
/* peeling_for_alignment indicates whether peeling for alignment will take
place, and what the peeling factor should be:
@@ -558,7 +558,7 @@ public:
/* If if-conversion versioned this loop before conversion, this is the
loop version without if-conversion. */
- struct loop *scalar_loop;
+ class loop *scalar_loop;
/* For loops being epilogues of already vectorized loops
this points to the original vectorized loop. Otherwise NULL. */
@@ -650,7 +650,7 @@ public:
typedef opt_pointer_wrapper <loop_vec_info> opt_loop_vec_info;
static inline loop_vec_info
-loop_vec_info_for_loop (struct loop *loop)
+loop_vec_info_for_loop (class loop *loop)
{
return (loop_vec_info) loop->aux;
}
@@ -1112,7 +1112,7 @@ STMT_VINFO_BB_VINFO (stmt_vec_info stmt_vinfo)
&& TYPE_UNSIGNED (TYPE)))
static inline bool
-nested_in_vect_loop_p (struct loop *loop, stmt_vec_info stmt_info)
+nested_in_vect_loop_p (class loop *loop, stmt_vec_info stmt_info)
{
return (loop->inner
&& (loop->inner == (gimple_bb (stmt_info->stmt))->loop_father));
@@ -1206,7 +1206,7 @@ int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost)
/* Alias targetm.vectorize.init_cost. */
static inline void *
-init_cost (struct loop *loop_info)
+init_cost (class loop *loop_info)
{
return targetm.vectorize.init_cost (loop_info);
}
@@ -1475,17 +1475,17 @@ class auto_purge_vect_location
/* Simple loop peeling and versioning utilities for vectorizer's purposes -
in tree-vect-loop-manip.c. */
-extern void vect_set_loop_condition (struct loop *, loop_vec_info,
+extern void vect_set_loop_condition (class loop *, loop_vec_info,
tree, tree, tree, bool);
-extern bool slpeel_can_duplicate_loop_p (const struct loop *, const_edge);
-struct loop *slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *,
- struct loop *, edge);
-struct loop *vect_loop_versioning (loop_vec_info, unsigned int, bool,
+extern bool slpeel_can_duplicate_loop_p (const class loop *, const_edge);
+class loop *slpeel_tree_duplicate_loop_to_edge_cfg (class loop *,
+ class loop *, edge);
+class loop *vect_loop_versioning (loop_vec_info, unsigned int, bool,
poly_uint64);
-extern struct loop *vect_do_peeling (loop_vec_info, tree, tree,
+extern class loop *vect_do_peeling (loop_vec_info, tree, tree,
tree *, tree *, tree *, int, bool, bool);
extern void vect_prepare_for_masked_peels (loop_vec_info);
-extern dump_user_location_t find_loop_location (struct loop *);
+extern dump_user_location_t find_loop_location (class loop *);
extern bool vect_can_advance_ivs_p (loop_vec_info);
/* In tree-vect-stmts.c. */
@@ -1544,7 +1544,7 @@ extern void vect_get_store_cost (stmt_vec_info, int,
extern bool vect_supportable_shift (enum tree_code, tree);
extern tree vect_gen_perm_mask_any (tree, const vec_perm_indices &);
extern tree vect_gen_perm_mask_checked (tree, const vec_perm_indices &);
-extern void optimize_mask_stores (struct loop*);
+extern void optimize_mask_stores (class loop*);
extern gcall *vect_gen_while (tree, tree, tree);
extern tree vect_gen_while_not (gimple_seq *, tree, tree, tree);
extern opt_result vect_get_vector_types_for_stmt (stmt_vec_info, tree *,
@@ -1573,7 +1573,7 @@ extern opt_result vect_find_stmt_data_reference (loop_p, gimple *,
vec<data_reference_p> *);
extern opt_result vect_analyze_data_refs (vec_info *, poly_uint64 *, bool *);
extern void vect_record_base_alignments (vec_info *);
-extern tree vect_create_data_ref_ptr (stmt_vec_info, tree, struct loop *, tree,
+extern tree vect_create_data_ref_ptr (stmt_vec_info, tree, class loop *, tree,
tree *, gimple_stmt_iterator *,
gimple **, bool,
tree = NULL_TREE, tree = NULL_TREE);
@@ -1589,7 +1589,7 @@ extern void vect_permute_store_chain (vec<tree> ,unsigned int, stmt_vec_info,
gimple_stmt_iterator *, vec<tree> *);
extern tree vect_setup_realignment (stmt_vec_info, gimple_stmt_iterator *,
tree *, enum dr_alignment_support, tree,
- struct loop **);
+ class loop **);
extern void vect_transform_grouped_load (stmt_vec_info, vec<tree> , int,
gimple_stmt_iterator *);
extern void vect_record_grouped_load_vectors (stmt_vec_info, vec<tree>);
@@ -1608,7 +1608,7 @@ extern widest_int vect_iv_limit_for_full_masking (loop_vec_info loop_vinfo);
extern bool check_reduction_path (dump_user_location_t, loop_p, gphi *, tree,
enum tree_code);
/* Drive for loop analysis stage. */
-extern opt_loop_vec_info vect_analyze_loop (struct loop *,
+extern opt_loop_vec_info vect_analyze_loop (class loop *,
loop_vec_info,
vec_info_shared *);
extern tree vect_build_loop_niters (loop_vec_info, bool * = NULL);
@@ -1622,8 +1622,8 @@ extern tree vect_get_loop_mask (gimple_stmt_iterator *, vec_loop_masks *,
unsigned int, tree, unsigned int);
/* Drive for loop transformation stage. */
-extern struct loop *vect_transform_loop (loop_vec_info);
-extern opt_loop_vec_info vect_analyze_loop_form (struct loop *,
+extern class loop *vect_transform_loop (loop_vec_info);
+extern opt_loop_vec_info vect_analyze_loop_form (class loop *,
vec_info_shared *);
extern bool vectorizable_live_operation (stmt_vec_info, gimple_stmt_iterator *,
slp_tree, int, stmt_vec_info *,
@@ -1671,8 +1671,8 @@ void vect_pattern_recog (vec_info *);
/* In tree-vectorizer.c. */
unsigned vectorize_loops (void);
-void vect_free_loop_info_assumptions (struct loop *);
-gimple *vect_loop_vectorized_call (struct loop *, gcond **cond = NULL);
+void vect_free_loop_info_assumptions (class loop *);
+gimple *vect_loop_vectorized_call (class loop *, gcond **cond = NULL);
#endif /* GCC_TREE_VECTORIZER_H */
diff --git a/gcc/tree.c b/gcc/tree.c
index 7892c20e5c4..a6099639fb0 100644
--- a/gcc/tree.c
+++ b/gcc/tree.c
@@ -5130,7 +5130,7 @@ public:
language data removed. The lists are held inside FLD. */
static void
-add_tree_to_fld_list (tree t, struct free_lang_data_d *fld)
+add_tree_to_fld_list (tree t, class free_lang_data_d *fld)
{
if (DECL_P (t))
fld->decls.safe_push (t);
@@ -5143,7 +5143,7 @@ add_tree_to_fld_list (tree t, struct free_lang_data_d *fld)
/* Push tree node T into FLD->WORKLIST. */
static inline void
-fld_worklist_push (tree t, struct free_lang_data_d *fld)
+fld_worklist_push (tree t, class free_lang_data_d *fld)
{
if (t && !is_lang_specific (t) && !fld->pset.contains (t))
fld->worklist.safe_push ((t));
@@ -5197,7 +5197,7 @@ fld_type_variant_equal_p (tree t, tree v, tree inner_type)
Set TREE_TYPE to INNER_TYPE if non-NULL. */
static tree
-fld_type_variant (tree first, tree t, struct free_lang_data_d *fld,
+fld_type_variant (tree first, tree t, class free_lang_data_d *fld,
tree inner_type = NULL)
{
if (first == TYPE_MAIN_VARIANT (t))
@@ -5242,7 +5242,7 @@ static hash_map<tree, tree> *fld_simplified_types;
static tree
fld_process_array_type (tree t, tree t2, hash_map<tree, tree> *map,
- struct free_lang_data_d *fld)
+ class free_lang_data_d *fld)
{
if (TREE_TYPE (t) == t2)
return t;
@@ -5291,7 +5291,7 @@ fld_decl_context (tree ctx)
Return T if no simplification is possible. */
static tree
-fld_incomplete_type_of (tree t, struct free_lang_data_d *fld)
+fld_incomplete_type_of (tree t, class free_lang_data_d *fld)
{
if (!t)
return NULL;
@@ -5383,7 +5383,7 @@ fld_incomplete_type_of (tree t, struct free_lang_data_d *fld)
types. */
static tree
-fld_simplified_type (tree t, struct free_lang_data_d *fld)
+fld_simplified_type (tree t, class free_lang_data_d *fld)
{
if (!t)
return t;
@@ -5441,7 +5441,7 @@ free_lang_data_in_binfo (tree binfo)
/* Reset all language specific information still present in TYPE. */
static void
-free_lang_data_in_type (tree type, struct free_lang_data_d *fld)
+free_lang_data_in_type (tree type, class free_lang_data_d *fld)
{
gcc_assert (TYPE_P (type));
@@ -5657,7 +5657,7 @@ need_assembler_name_p (tree decl)
DECL. */
static void
-free_lang_data_in_decl (tree decl, struct free_lang_data_d *fld)
+free_lang_data_in_decl (tree decl, class free_lang_data_d *fld)
{
gcc_assert (DECL_P (decl));
@@ -5821,7 +5821,7 @@ static tree
find_decls_types_r (tree *tp, int *ws, void *data)
{
tree t = *tp;
- struct free_lang_data_d *fld = (struct free_lang_data_d *) data;
+ class free_lang_data_d *fld = (class free_lang_data_d *) data;
if (TREE_CODE (t) == TREE_LIST)
return NULL_TREE;
@@ -5977,7 +5977,7 @@ find_decls_types_r (tree *tp, int *ws, void *data)
/* Find decls and types in T. */
static void
-find_decls_types (tree t, struct free_lang_data_d *fld)
+find_decls_types (tree t, class free_lang_data_d *fld)
{
while (1)
{
@@ -6019,7 +6019,7 @@ get_eh_types_for_runtime (tree list)
FLD->DECLS and FLD->TYPES. */
static void
-find_decls_types_in_eh_region (eh_region r, struct free_lang_data_d *fld)
+find_decls_types_in_eh_region (eh_region r, class free_lang_data_d *fld)
{
switch (r->type)
{
@@ -6062,7 +6062,7 @@ find_decls_types_in_eh_region (eh_region r, struct free_lang_data_d *fld)
NAMESPACE_DECLs, etc). */
static void
-find_decls_types_in_node (struct cgraph_node *n, struct free_lang_data_d *fld)
+find_decls_types_in_node (struct cgraph_node *n, class free_lang_data_d *fld)
{
basic_block bb;
struct function *fn;
@@ -6131,7 +6131,7 @@ find_decls_types_in_node (struct cgraph_node *n, struct free_lang_data_d *fld)
NAMESPACE_DECLs, etc). */
static void
-find_decls_types_in_var (varpool_node *v, struct free_lang_data_d *fld)
+find_decls_types_in_var (varpool_node *v, class free_lang_data_d *fld)
{
find_decls_types (v->decl, fld);
}
@@ -6182,7 +6182,7 @@ assign_assembler_name_if_needed (tree t)
been set up. */
static void
-free_lang_data_in_cgraph (struct free_lang_data_d *fld)
+free_lang_data_in_cgraph (class free_lang_data_d *fld)
{
struct cgraph_node *n;
varpool_node *v;
@@ -6223,7 +6223,7 @@ static unsigned
free_lang_data (void)
{
unsigned i;
- struct free_lang_data_d fld;
+ class free_lang_data_d fld;
/* If we are the LTO frontend we have freed lang-specific data already. */
if (in_lto_p
diff --git a/gcc/value-prof.c b/gcc/value-prof.c
index 66c4bbaad5c..32e6ddd8165 100644
--- a/gcc/value-prof.c
+++ b/gcc/value-prof.c
@@ -345,7 +345,7 @@ stream_out_histogram_value (struct output_block *ob, histogram_value hist)
/* Dump information about HIST to DUMP_FILE. */
void
-stream_in_histogram_value (struct lto_input_block *ib, gimple *stmt)
+stream_in_histogram_value (class lto_input_block *ib, gimple *stmt)
{
enum hist_type type;
unsigned int ncounters = 0;
diff --git a/gcc/value-prof.h b/gcc/value-prof.h
index 9f69d7df6d1..ca846d08cbd 100644
--- a/gcc/value-prof.h
+++ b/gcc/value-prof.h
@@ -108,7 +108,7 @@ extern void gimple_gen_time_profiler (unsigned, unsigned);
extern void gimple_gen_average_profiler (histogram_value, unsigned, unsigned);
extern void gimple_gen_ior_profiler (histogram_value, unsigned, unsigned);
extern void stream_out_histogram_value (struct output_block *, histogram_value);
-extern void stream_in_histogram_value (struct lto_input_block *, gimple *);
+extern void stream_in_histogram_value (class lto_input_block *, gimple *);
extern struct cgraph_node* find_func_by_profile_id (int func_id);
diff --git a/gcc/var-tracking.c b/gcc/var-tracking.c
index 65df25dfb52..67f25c1c795 100644
--- a/gcc/var-tracking.c
+++ b/gcc/var-tracking.c
@@ -1031,7 +1031,7 @@ use_narrower_mode (rtx x, scalar_int_mode mode, scalar_int_mode wmode)
static rtx
adjust_mems (rtx loc, const_rtx old_rtx, void *data)
{
- struct adjust_mem_data *amd = (struct adjust_mem_data *) data;
+ class adjust_mem_data *amd = (class adjust_mem_data *) data;
rtx mem, addr = loc, tem;
machine_mode mem_mode_save;
bool store_save;
@@ -6389,7 +6389,7 @@ prepare_call_arguments (basic_block bb, rtx_insn *insn)
if (!frame_pointer_needed)
{
- struct adjust_mem_data amd;
+ class adjust_mem_data amd;
amd.mem_mode = VOIDmode;
amd.stack_adjust = -VTI (bb)->out.stack_adjust;
amd.store = true;
@@ -8330,8 +8330,8 @@ static inline rtx
vt_expand_var_loc_chain (variable *var, bitmap regs, void *data,
bool *pendrecp)
{
- struct expand_loc_callback_data *elcd
- = (struct expand_loc_callback_data *) data;
+ class expand_loc_callback_data *elcd
+ = (class expand_loc_callback_data *) data;
location_chain *loc, *next;
rtx result = NULL;
int first_child, result_first_child, last_child;
@@ -8469,8 +8469,8 @@ vt_expand_loc_callback (rtx x, bitmap regs,
int max_depth ATTRIBUTE_UNUSED,
void *data)
{
- struct expand_loc_callback_data *elcd
- = (struct expand_loc_callback_data *) data;
+ class expand_loc_callback_data *elcd
+ = (class expand_loc_callback_data *) data;
decl_or_value dv;
variable *var;
rtx result, subreg;
@@ -8627,7 +8627,7 @@ resolve_expansions_pending_recursion (vec<rtx, va_heap> *pending)
static rtx
vt_expand_loc (rtx loc, variable_table_type *vars)
{
- struct expand_loc_callback_data data;
+ class expand_loc_callback_data data;
rtx result;
if (!MAY_HAVE_DEBUG_BIND_INSNS)
@@ -8649,7 +8649,7 @@ vt_expand_loc (rtx loc, variable_table_type *vars)
static rtx
vt_expand_1pvar (variable *var, variable_table_type *vars)
{
- struct expand_loc_callback_data data;
+ class expand_loc_callback_data data;
rtx loc;
gcc_checking_assert (var->onepart && var->n_var_parts == 1);
diff --git a/gcc/varasm.c b/gcc/varasm.c
index 827a37092fd..e886cdc71b8 100644
--- a/gcc/varasm.c
+++ b/gcc/varasm.c
@@ -68,8 +68,8 @@ extern GTY(()) const char *weak_global_object_name;
const char *first_global_object_name;
const char *weak_global_object_name;
-struct addr_const;
-struct constant_descriptor_rtx;
+class addr_const;
+class constant_descriptor_rtx;
struct rtx_constant_pool;
#define n_deferred_constants (crtl->varasm.deferred_constants)
@@ -105,7 +105,7 @@ static int contains_pointers_p (tree);
#ifdef ASM_OUTPUT_EXTERNAL
static bool incorporeal_function_p (tree);
#endif
-static void decode_addr_const (tree, struct addr_const *);
+static void decode_addr_const (tree, class addr_const *);
static hashval_t const_hash_1 (const tree);
static int compare_constant (const tree, const tree);
static void output_constant_def_contents (rtx);
@@ -2906,7 +2906,7 @@ public:
};
static void
-decode_addr_const (tree exp, struct addr_const *value)
+decode_addr_const (tree exp, class addr_const *value)
{
tree target = TREE_OPERAND (exp, 0);
poly_int64 offset = 0;
@@ -3076,7 +3076,7 @@ const_hash_1 (const tree exp)
/* Fallthru. */
case FDESC_EXPR:
{
- struct addr_const value;
+ class addr_const value;
decode_addr_const (exp, &value);
switch (GET_CODE (value.base))
@@ -3272,7 +3272,7 @@ compare_constant (const tree t1, const tree t2)
case ADDR_EXPR:
case FDESC_EXPR:
{
- struct addr_const value1, value2;
+ class addr_const value1, value2;
enum rtx_code code;
int ret;
@@ -3624,7 +3624,7 @@ tree_output_constant_def (tree exp)
class GTY((chain_next ("%h.next"), for_user)) constant_descriptor_rtx {
public:
- struct constant_descriptor_rtx *next;
+ class constant_descriptor_rtx *next;
rtx mem;
rtx sym;
rtx constant;
@@ -3651,8 +3651,8 @@ struct const_rtx_desc_hasher : ggc_ptr_hash<constant_descriptor_rtx>
struct GTY(()) rtx_constant_pool {
/* Pointers to first and last constant in pool, as ordered by offset. */
- struct constant_descriptor_rtx *first;
- struct constant_descriptor_rtx *last;
+ class constant_descriptor_rtx *first;
+ class constant_descriptor_rtx *last;
/* Hash facility for making memory-constants from constant rtl-expressions.
It is used on RISC machines where immediate integer arguments and
@@ -3812,7 +3812,7 @@ simplify_subtraction (rtx x)
rtx
force_const_mem (machine_mode in_mode, rtx x)
{
- struct constant_descriptor_rtx *desc, tmp;
+ class constant_descriptor_rtx *desc, tmp;
struct rtx_constant_pool *pool;
char label[256];
rtx def, symbol;
@@ -3920,7 +3920,7 @@ get_pool_constant (const_rtx addr)
rtx
get_pool_constant_mark (rtx addr, bool *pmarked)
{
- struct constant_descriptor_rtx *desc;
+ class constant_descriptor_rtx *desc;
desc = SYMBOL_REF_CONSTANT (addr);
*pmarked = (desc->mark != 0);
@@ -4028,7 +4028,7 @@ output_constant_pool_2 (fixed_size_mode mode, rtx x, unsigned int align)
giving it ALIGN bits of alignment. */
static void
-output_constant_pool_1 (struct constant_descriptor_rtx *desc,
+output_constant_pool_1 (class constant_descriptor_rtx *desc,
unsigned int align)
{
rtx x, tmp;
@@ -4105,7 +4105,7 @@ output_constant_pool_1 (struct constant_descriptor_rtx *desc,
static void
recompute_pool_offsets (struct rtx_constant_pool *pool)
{
- struct constant_descriptor_rtx *desc;
+ class constant_descriptor_rtx *desc;
pool->offset = 0;
for (desc = pool->first; desc ; desc = desc->next)
@@ -4134,7 +4134,7 @@ mark_constants_in_pattern (rtx insn)
{
if (CONSTANT_POOL_ADDRESS_P (x))
{
- struct constant_descriptor_rtx *desc = SYMBOL_REF_CONSTANT (x);
+ class constant_descriptor_rtx *desc = SYMBOL_REF_CONSTANT (x);
if (desc->mark == 0)
{
desc->mark = 1;
@@ -4203,7 +4203,7 @@ mark_constant_pool (void)
static void
output_constant_pool_contents (struct rtx_constant_pool *pool)
{
- struct constant_descriptor_rtx *desc;
+ class constant_descriptor_rtx *desc;
for (desc = pool->first; desc ; desc = desc->next)
if (desc->mark)
@@ -7452,7 +7452,7 @@ void
place_block_symbol (rtx symbol)
{
unsigned HOST_WIDE_INT size, mask, offset;
- struct constant_descriptor_rtx *desc;
+ class constant_descriptor_rtx *desc;
unsigned int alignment;
struct object_block *block;
tree decl;
@@ -7614,7 +7614,7 @@ get_section_anchor (struct object_block *block, HOST_WIDE_INT offset,
static void
output_object_block (struct object_block *block)
{
- struct constant_descriptor_rtx *desc;
+ class constant_descriptor_rtx *desc;
unsigned int i;
HOST_WIDE_INT offset;
tree decl;
diff --git a/gcc/vr-values.c b/gcc/vr-values.c
index 3f20c1a6fe8..9a4aea0bf86 100644
--- a/gcc/vr-values.c
+++ b/gcc/vr-values.c
@@ -1711,7 +1711,7 @@ compare_range_with_value (enum tree_code comp, value_range *vr, tree val,
for VAR. If so, update VR with the new limits. */
void
-vr_values::adjust_range_with_scev (value_range *vr, struct loop *loop,
+vr_values::adjust_range_with_scev (value_range *vr, class loop *loop,
gimple *stmt, tree var)
{
tree init, step, chrec, tmin, tmax, min, max, type, tem;
@@ -2806,7 +2806,7 @@ vr_values::extract_range_from_phi_node (gphi *phi, value_range *vr_result)
value_range *lhs_vr = get_value_range (lhs);
bool first = true;
int edges, old_edges;
- struct loop *l;
+ class loop *l;
if (dump_file && (dump_flags & TDF_DETAILS))
{
diff --git a/gcc/vr-values.h b/gcc/vr-values.h
index bd67f73701e..3856da1f9a8 100644
--- a/gcc/vr-values.h
+++ b/gcc/vr-values.h
@@ -46,7 +46,7 @@ class vr_values
void set_defs_to_varying (gimple *);
bool update_value_range (const_tree, value_range *);
tree op_with_constant_singleton_value_range (tree);
- void adjust_range_with_scev (value_range *, struct loop *, gimple *, tree);
+ void adjust_range_with_scev (value_range *, class loop *, gimple *, tree);
tree vrp_evaluate_conditional (tree_code, tree, tree, gimple *);
void dump_all_value_ranges (FILE *);
diff --git a/gcc/web.c b/gcc/web.c
index 4a9bec0c6e2..dcc15ca4d78 100644
--- a/gcc/web.c
+++ b/gcc/web.c
@@ -74,7 +74,7 @@ unionfind_union (web_entry_base *first, web_entry_base *second)
return false;
}
-class web_entry : public web_entry_base
+struct web_entry : public web_entry_base
{
private:
rtx reg_pvt;
diff --git a/gcc/wide-int.h b/gcc/wide-int.h
index 18e76b7ab1f..bb8ced12350 100644
--- a/gcc/wide-int.h
+++ b/gcc/wide-int.h
@@ -329,7 +329,7 @@ typedef generic_wide_int < fixed_wide_int_storage <WIDE_INT_MAX_PRECISION * 2> >
/* wi::storage_ref can be a reference to a primitive type,
so this is the conservatively-correct setting. */
template <bool SE, bool HDP = true>
-struct wide_int_ref_storage;
+class wide_int_ref_storage;
typedef generic_wide_int <wide_int_ref_storage <false> > wide_int_ref;