summaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorAldy Hernandez <aldyh@gcc.gnu.org>2011-11-08 11:13:41 +0000
committerAldy Hernandez <aldyh@gcc.gnu.org>2011-11-08 11:13:41 +0000
commit0a35513e4e73ec9c6f24e791d344308ad3ed030d (patch)
treee07de8d0b6265f8d72388d335bd471022e753d57 /gcc
parent287188ea072dd887a17dd56360531c3a22307e7c (diff)
Merge from transactional-memory branch.
From-SVN: r181154
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog198
-rw-r--r--gcc/Makefile.in13
-rw-r--r--gcc/attribs.c19
-rw-r--r--gcc/builtin-attrs.def22
-rw-r--r--gcc/builtin-types.def21
-rw-r--r--gcc/builtins.def13
-rw-r--r--gcc/c-family/ChangeLog16
-rw-r--r--gcc/c-family/c-common.c241
-rw-r--r--gcc/c-family/c-common.h25
-rw-r--r--gcc/c-parser.c353
-rw-r--r--gcc/c-tree.h1
-rw-r--r--gcc/c-typeck.c16
-rw-r--r--gcc/calls.c85
-rw-r--r--gcc/cfgbuild.c32
-rw-r--r--gcc/cfgexpand.c42
-rw-r--r--gcc/cfgrtl.c2
-rw-r--r--gcc/cgraph.c2
-rw-r--r--gcc/cgraph.h20
-rw-r--r--gcc/cgraphunit.c4
-rw-r--r--gcc/combine.c1
-rw-r--r--gcc/common.opt4
-rw-r--r--gcc/config/i386/i386-builtin-types.def8
-rw-r--r--gcc/config/i386/i386.c195
-rw-r--r--gcc/cp/ChangeLog37
-rw-r--r--gcc/cp/call.c3
-rw-r--r--gcc/cp/class.c154
-rw-r--r--gcc/cp/cp-tree.h8
-rw-r--r--gcc/cp/decl.c2
-rw-r--r--gcc/cp/except.c49
-rw-r--r--gcc/cp/parser.c339
-rw-r--r--gcc/cp/parser.h4
-rw-r--r--gcc/cp/pt.c22
-rw-r--r--gcc/cp/semantics.c59
-rw-r--r--gcc/doc/invoke.texi20
-rw-r--r--gcc/doc/tm.texi8
-rw-r--r--gcc/doc/tm.texi.in4
-rw-r--r--gcc/emit-rtl.c1
-rw-r--r--gcc/gimple-low.c13
-rw-r--r--gcc/gimple-pretty-print.c170
-rw-r--r--gcc/gimple.c137
-rw-r--r--gcc/gimple.def14
-rw-r--r--gcc/gimple.h168
-rw-r--r--gcc/gimplify.c59
-rw-r--r--gcc/gsstruct.def2
-rw-r--r--gcc/gtm-builtins.def208
-rw-r--r--gcc/ipa-inline.c8
-rw-r--r--gcc/omp-low.c1
-rw-r--r--gcc/opts.c2
-rw-r--r--gcc/output.h4
-rw-r--r--gcc/params.def7
-rw-r--r--gcc/passes.c10
-rw-r--r--gcc/print-tree.c2
-rw-r--r--gcc/recog.c1
-rw-r--r--gcc/reg-notes.def5
-rw-r--r--gcc/rtlanal.c1
-rw-r--r--gcc/target.def18
-rw-r--r--gcc/targhooks.c6
-rw-r--r--gcc/targhooks.h3
-rw-r--r--gcc/testsuite/ChangeLog11
-rw-r--r--gcc/testsuite/c-c++-common/tm/20100127.c36
-rw-r--r--gcc/testsuite/c-c++-common/tm/abort-1.c6
-rw-r--r--gcc/testsuite/c-c++-common/tm/abort-2.c11
-rw-r--r--gcc/testsuite/c-c++-common/tm/abort-3.c8
-rw-r--r--gcc/testsuite/c-c++-common/tm/atomic-1.c9
-rw-r--r--gcc/testsuite/c-c++-common/tm/atomic-2.c10
-rw-r--r--gcc/testsuite/c-c++-common/tm/attrib-1.c25
-rw-r--r--gcc/testsuite/c-c++-common/tm/cancel-1.c17
-rw-r--r--gcc/testsuite/c-c++-common/tm/freq.c14
-rw-r--r--gcc/testsuite/c-c++-common/tm/inline-asm-2.c8
-rw-r--r--gcc/testsuite/c-c++-common/tm/inline-asm.c17
-rw-r--r--gcc/testsuite/c-c++-common/tm/ipa-1.c23
-rw-r--r--gcc/testsuite/c-c++-common/tm/malloc.c24
-rw-r--r--gcc/testsuite/c-c++-common/tm/memcpy-1.c9
-rw-r--r--gcc/testsuite/c-c++-common/tm/omp.c22
-rw-r--r--gcc/testsuite/c-c++-common/tm/outer-1.c31
-rw-r--r--gcc/testsuite/c-c++-common/tm/safe-1.c69
-rw-r--r--gcc/testsuite/c-c++-common/tm/safe-2.c43
-rw-r--r--gcc/testsuite/c-c++-common/tm/safe-3.c48
-rw-r--r--gcc/testsuite/c-c++-common/tm/trxn-expr-2.c15
-rw-r--r--gcc/testsuite/c-c++-common/tm/trxn-expr.c13
-rw-r--r--gcc/testsuite/c-c++-common/tm/wrap-1.c10
-rw-r--r--gcc/testsuite/g++.dg/dg.exp1
-rw-r--r--gcc/testsuite/g++.dg/tm/20100429.C15
-rw-r--r--gcc/testsuite/g++.dg/tm/20100727.C796
-rw-r--r--gcc/testsuite/g++.dg/tm/alias.C20
-rw-r--r--gcc/testsuite/g++.dg/tm/attrib-2.C22
-rw-r--r--gcc/testsuite/g++.dg/tm/attrib-3.C33
-rw-r--r--gcc/testsuite/g++.dg/tm/attrib-4.C48
-rw-r--r--gcc/testsuite/g++.dg/tm/fatomic-1.C10
-rw-r--r--gcc/testsuite/g++.dg/tm/nested-1.C22
-rw-r--r--gcc/testsuite/g++.dg/tm/nested-2.C41
-rw-r--r--gcc/testsuite/g++.dg/tm/nested-3.C43
-rw-r--r--gcc/testsuite/g++.dg/tm/opt-1.C9
-rw-r--r--gcc/testsuite/g++.dg/tm/pr45940-2.C30
-rw-r--r--gcc/testsuite/g++.dg/tm/pr45940-3.C69
-rw-r--r--gcc/testsuite/g++.dg/tm/pr45940-4.C69
-rw-r--r--gcc/testsuite/g++.dg/tm/pr45940.C30
-rw-r--r--gcc/testsuite/g++.dg/tm/pr46269.C29
-rw-r--r--gcc/testsuite/g++.dg/tm/pr46270.C27
-rw-r--r--gcc/testsuite/g++.dg/tm/pr46300.C8
-rw-r--r--gcc/testsuite/g++.dg/tm/pr46567.C2676
-rw-r--r--gcc/testsuite/g++.dg/tm/pr46646.C890
-rw-r--r--gcc/testsuite/g++.dg/tm/pr46653.C18
-rw-r--r--gcc/testsuite/g++.dg/tm/pr46714.C14
-rw-r--r--gcc/testsuite/g++.dg/tm/pr46941.C37
-rw-r--r--gcc/testsuite/g++.dg/tm/pr47340.C11
-rw-r--r--gcc/testsuite/g++.dg/tm/pr47530.C79
-rw-r--r--gcc/testsuite/g++.dg/tm/pr47554.C27
-rw-r--r--gcc/testsuite/g++.dg/tm/pr47573.C25
-rw-r--r--gcc/testsuite/g++.dg/tm/pr47746.C27
-rw-r--r--gcc/testsuite/g++.dg/tm/template-1.C35
-rw-r--r--gcc/testsuite/g++.dg/tm/tm.exp39
-rw-r--r--gcc/testsuite/g++.dg/tm/vector-1.C15
-rw-r--r--gcc/testsuite/g++.dg/tm/wrap-2.C16
-rw-r--r--gcc/testsuite/gcc.dg/tm/20091013.c13
-rw-r--r--gcc/testsuite/gcc.dg/tm/20091221.c15
-rw-r--r--gcc/testsuite/gcc.dg/tm/20100125.c17
-rw-r--r--gcc/testsuite/gcc.dg/tm/20100519.c17
-rw-r--r--gcc/testsuite/gcc.dg/tm/20100524-2.c20
-rw-r--r--gcc/testsuite/gcc.dg/tm/20100603.c21
-rw-r--r--gcc/testsuite/gcc.dg/tm/20100609.c14
-rw-r--r--gcc/testsuite/gcc.dg/tm/20100610.c90
-rw-r--r--gcc/testsuite/gcc.dg/tm/20100615-2.c19
-rw-r--r--gcc/testsuite/gcc.dg/tm/20100615.c42
-rw-r--r--gcc/testsuite/gcc.dg/tm/20110216.c15
-rw-r--r--gcc/testsuite/gcc.dg/tm/alias-1.c40
-rw-r--r--gcc/testsuite/gcc.dg/tm/alias-2.c42
-rw-r--r--gcc/testsuite/gcc.dg/tm/data-1.c48
-rw-r--r--gcc/testsuite/gcc.dg/tm/data-2.c22
-rw-r--r--gcc/testsuite/gcc.dg/tm/debug-1.c26
-rw-r--r--gcc/testsuite/gcc.dg/tm/indirect-1.c9
-rw-r--r--gcc/testsuite/gcc.dg/tm/ipa-1.c14
-rw-r--r--gcc/testsuite/gcc.dg/tm/ipa-2.c14
-rw-r--r--gcc/testsuite/gcc.dg/tm/ipa-3.c12
-rw-r--r--gcc/testsuite/gcc.dg/tm/irrevocable-1.c16
-rw-r--r--gcc/testsuite/gcc.dg/tm/irrevocable-2.c21
-rw-r--r--gcc/testsuite/gcc.dg/tm/irrevocable-3.c14
-rw-r--r--gcc/testsuite/gcc.dg/tm/irrevocable-4.c16
-rw-r--r--gcc/testsuite/gcc.dg/tm/irrevocable-5.c27
-rw-r--r--gcc/testsuite/gcc.dg/tm/irrevocable-6.c34
-rw-r--r--gcc/testsuite/gcc.dg/tm/irrevocable-7.c13
-rw-r--r--gcc/testsuite/gcc.dg/tm/memopt-1.c29
-rw-r--r--gcc/testsuite/gcc.dg/tm/memopt-10.c28
-rw-r--r--gcc/testsuite/gcc.dg/tm/memopt-11.c29
-rw-r--r--gcc/testsuite/gcc.dg/tm/memopt-12.c34
-rw-r--r--gcc/testsuite/gcc.dg/tm/memopt-13.c15
-rw-r--r--gcc/testsuite/gcc.dg/tm/memopt-15.c30
-rw-r--r--gcc/testsuite/gcc.dg/tm/memopt-2.c15
-rw-r--r--gcc/testsuite/gcc.dg/tm/memopt-3.c20
-rw-r--r--gcc/testsuite/gcc.dg/tm/memopt-4.c24
-rw-r--r--gcc/testsuite/gcc.dg/tm/memopt-5.c23
-rw-r--r--gcc/testsuite/gcc.dg/tm/memopt-6.c20
-rw-r--r--gcc/testsuite/gcc.dg/tm/memopt-7.c22
-rw-r--r--gcc/testsuite/gcc.dg/tm/memopt-8.c26
-rw-r--r--gcc/testsuite/gcc.dg/tm/memopt-9.c29
-rw-r--r--gcc/testsuite/gcc.dg/tm/memset-2.c17
-rw-r--r--gcc/testsuite/gcc.dg/tm/memset.c24
-rw-r--r--gcc/testsuite/gcc.dg/tm/nested-1.c28
-rw-r--r--gcc/testsuite/gcc.dg/tm/nested-2.c19
-rw-r--r--gcc/testsuite/gcc.dg/tm/opt-1.c43
-rw-r--r--gcc/testsuite/gcc.dg/tm/opt-2.c14
-rw-r--r--gcc/testsuite/gcc.dg/tm/pr45985.c14
-rw-r--r--gcc/testsuite/gcc.dg/tm/pr46567-2.c18
-rw-r--r--gcc/testsuite/gcc.dg/tm/pr46567.c18
-rw-r--r--gcc/testsuite/gcc.dg/tm/pr46654.c26
-rw-r--r--gcc/testsuite/gcc.dg/tm/pr47520.c29
-rw-r--r--gcc/testsuite/gcc.dg/tm/pr47690.c14
-rw-r--r--gcc/testsuite/gcc.dg/tm/pr47905.c14
-rw-r--r--gcc/testsuite/gcc.dg/tm/props-1.c19
-rw-r--r--gcc/testsuite/gcc.dg/tm/props-2.c20
-rw-r--r--gcc/testsuite/gcc.dg/tm/props-3.c15
-rw-r--r--gcc/testsuite/gcc.dg/tm/props-4.c27
-rw-r--r--gcc/testsuite/gcc.dg/tm/tm.exp39
-rw-r--r--gcc/testsuite/gcc.dg/tm/unsafe.c13
-rw-r--r--gcc/testsuite/gcc.dg/tm/unused.c15
-rw-r--r--gcc/testsuite/gcc.dg/tm/vector-1.c18
-rw-r--r--gcc/testsuite/gcc.dg/tm/wrap-2.c16
-rw-r--r--gcc/testsuite/gcc.dg/tm/wrap-3.c14
-rw-r--r--gcc/testsuite/gcc.dg/tm/wrap-4.c15
-rw-r--r--gcc/timevar.def1
-rw-r--r--gcc/toplev.c1
-rw-r--r--gcc/trans-mem.c4914
-rw-r--r--gcc/trans-mem.h35
-rw-r--r--gcc/tree-cfg.c109
-rw-r--r--gcc/tree-eh.c173
-rw-r--r--gcc/tree-flow.h12
-rw-r--r--gcc/tree-inline.c17
-rw-r--r--gcc/tree-inline.h3
-rw-r--r--gcc/tree-pass.h7
-rw-r--r--gcc/tree-pretty-print.c20
-rw-r--r--gcc/tree-ssa-alias.c42
-rw-r--r--gcc/tree-ssa-structalias.c47
-rw-r--r--gcc/tree.c36
-rw-r--r--gcc/tree.def4
-rw-r--r--gcc/tree.h81
-rw-r--r--gcc/varasm.c156
196 files changed, 15520 insertions, 214 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index cfde0b63175..e6816e58628 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,4 +1,202 @@
2011-11-07 Richard Henderson <rth@redhat.com>
+ Aldy Hernandez <aldyh@redhat.com>
+ Andrew MacLeod <amacleod@redhat.com>
+ Torvald Riegel <triegel@redhat.com>
+
+ Merged from transactional-memory.
+
+ * gtm-builtins.def: New file.
+ * trans-mem.c: New file.
+ * trans-mem.h: New file.
+
+ * opts.c (finish_options): Error out when using -flto and
+ -fgnu-tm.
+
+ * config/i386/i386.c: Define TARGET_VECTORIZE* transactional variants.
+ (ix86_handle_tm_regparm_attribute, struct bdesc_tm,
+ ix86_builtin_tm_load, ix86_builtin_tm_store,
+ ix86_init_tm_builtins): New.
+ (ix86_init_builtins): Initialize TM builtins.
+ (struct ix86_attribute_table): Add "*tm regparm".
+ * config/i386/i386-builtin-types.def (PV2SI): Define.
+ (PCV2SI): Define.
+ Define V2SI_FTYPE_PCV2SI.
+ Define V4SF_FTYPE_PCV4SF.
+ Define V8SF_FTYPE_PCV8SF.
+ Define VOID_PV2SI_V2SI.
+
+ * doc/invoke.texi (C Dialect Options): Document -fgnu-tm and
+ tm-max-aggregate-size.
+ * doc/tm.texi.in: Add TARGET_VECTORIZE_BUILTIN_TM_LOAD and
+ TARGET_VECTORIZE_BUILTIN_TM_STORE hooks.
+ * doc/tm.texi: Regenerate.
+
+ * attribs.c (apply_tm_attr): New.
+ (init_attributes): Allow '*' prefix for overrides.
+ (register_attribute): Likewise.
+ * builtin-attrs.def (ATTR_TM_TMPURE, ATTR_TM_REGPARM): New.
+ (ATTR_TM_NOTHROW_LIST, ATTR_TM_TMPURE_NOTHROW_LIST,
+ ATTR_TM_PURE_TMPURE_NOTHROW_LIST, ATTR_TM_NORETURN_NOTHROW_LIST,
+ ATTR_TM_CONST_NOTHROW_LIST, ATTR_TMPURE_MALLOC_NOTHROW_LIST,
+ ATTR_TMPURE_NOTHROW_LIST): New.
+ * builtin-types.def (BT_FN_I[1248]_VPTR, BT_FN_FLOAT_VPTR,
+ BT_FN_DOUBLE_VPTR, BT_FN_LDOUBLE_VPTR, BT_FN_VOID_VPTR_I[1248],
+ BT_FN_VOID_VPTR_FLOAT, BT_FN_VOID_VPTR_DOUBLE,
+ BT_FN_VOID_VPTR_LDOUBLE, BT_FN_VOID_VPTR_SIZE): New.
+ * builtins.def: Include gtm-builtins.def. Add comments regarding
+ transactional memory synchronization.
+ (DEF_TM_BUILTIN): New.
+ * c-parser.c (struct c_parser): Add in_transaction.
+ (c_parser_transaction, c_parser_transaction_expression,
+ c_parser_transaction_cancel, c_parser_transaction_attributes): New.
+ (c_parser_attribute_any_word): Split out from c_parser_attributes.
+ (c_parser_statement_after_labels): Handle RID_TRANSACTION*.
+ (c_parser_unary_expression): Same.
+ * c-tree.h (c_finish_transaction): Declare.
+ * c-typeck.c (c_finish_transaction): New.
+ (build_function_call_vec): Call tm_malloc_replacement.
+ * calls.c (is_tm_builtin): New.
+ (flags_from_decl_or_type): Add ECF_TM_BUILTIN and ECF_TM_PURE.
+ * cfgbuild.c (make_edges): Add edges for REG_TM notes.
+ * cfgexpand.c (expand_call_stmt): Call
+ mark_transaction_restart_calls.
+ (gimple_expand_cfg): Free the tm_restart map.
+ (mark_transaction_restart_calls): New.
+ * cfgrtl.c (purge_dead_edges): Look for REG_TM notes.
+ * cgraph.c (dump_cgraph_node): Handle tm_clone.
+ * cgraph.h (struct cgraph_node): Add tm_clone field.
+ (decl_is_tm_clone): New.
+ (struct cgraph_local_info): Add tm_may_enter_irr.
+ (cgraph_copy_node_for_versioning): Declare.
+ * cgraphunit.c (cgraph_copy_node_for_versioning): Export;
+ copy analyzed from old version.
+ * combine.c (distribute_notes): Handle REG_TM notes.
+ * common.opt: Add -fgnu-tm.
+ * crtstuff.c (__TMC_LIST__, __TMC_END__): New.
+ (__do_global_dtors_aux): Deregister clone table.
+ (frame_dummy): Register clone table.
+ * emit-rtl.c (try_split): Handle REG_TM. Early return if no function
+ body.
+ * gimple-low.c (lower_stmt): Handle GIMPLE_EH_ELSE and
+ GIMPLE_TRANSACTION.
+ (gimple_stmt_may_fallthru): Handle GIMPLE_EH_ELSE.
+ * gimple-pretty-print.c: Include trans-mem.h.
+ (dump_gimple_fmt): Add %x.
+ (dump_gimple_call): Dump arguments for calls to _ITM_beginTransaction.
+ (dump_gimple_eh_else, dump_gimple_transaction): New.
+ (dump_gimple_stmt): Handle GIMPLE_EH_ELSE and GIMPLE_TRANSACTION.
+ * gimple.c (gimple_build_eh_else, gimple_build_transaction): New.
+ (walk_gimple_seq): Honor removed_stmt. Document usage of removed_stmt
+ field.
+ (walk_gimple_op): Handle GIMPLE_TRANSACTION.
+ (walk_gimple_stmt): Initialize and honor removed_stmt.
+ Handle GIMPLE_EH_ELSE and GIMPLE_TRANSACTION.
+ (gimple_copy): Handle GIMPLE_EH_ELSE and GIMPLE_TRANSACTION.
+ * gimple.def (GIMPLE_TRANSACTION, GIMPLE_EH_ELSE): New.
+ * gimple.h (struct gimple_statement_eh_else,
+ gimple_statement_transaction, GTMA_*): New.
+ (gimple_statement_d): Add gimple_statement_eh_else and
+ gimple_transaction.
+ (gimple_build_eh_else, gimple_build_transaction,
+ gimple_fold_call, diagnose_tm_safe_errors): Declare.
+ (get_call_expr_in): Remove prototype.
+ (gimple_has_substatements): Add GIMPLE_EH_ELSE and GIMPLE_TRANSACTION.
+ (gimple_eh_else_n_body, gimple_eh_else_e_body,
+ gimple_eh_else_set_n_body, gimple_eh_else_set_e_body,
+ gimple_transaction_body, gimple_transaction_label,
+ gimple_transaction_label_ptr, gimple_transaction_subcode,
+ gimple_transaction_set_body, gimple_transaction_set_label,
+ gimple_transaction_set_subcode): New.
+ (struct walk_stmt_info): Use BOOL_BITFIELD; add removed_stmt.
+ * gimplify.c (create_tmp_var_name): Use clean_symbol_name.
+ (voidify_wrapper_expr): Handle TRANSACTION_EXPR.
+ (gimplify_transaction): New.
+ (gimplify_expr): Handle TRANSACTION_EXPR.
+ * gsstruct.def (GSS_EH_ELSE, GSS_TRANSACTION): New.
+ * ipa-inline.c (can_inline_edge_p): Do not inline TM safe calling
+ TM pure functions.
+ * Makefile.in: Add trans-mem.o and dependencies.
+ (BUILTINS_DEF): Add gtm-builtins.def.
+ (gimple-pretty-print.o): Depend on TRANS_MEM_H.
+ (GTFILES): Add trans-mem.c.
+ * omp-low.c (WALK_SUBSTMTS): Add GIMPLE_TRANSACTION.
+ * output.h (record_tm_clone_pair, finish_tm_clone_pairs,
+ get_tm_clone_pair): Declare.
+ * params.def (PARAM_TM_MAX_AGGREGATE_SIZE): New.
+ * passes.c (init_optimization_passes): Place transactional memory
+ passes.
+ * print-tree.c (print_node): Dump tm-clone.
+ * recog.c (peep2_attempt): Handle REG_TM.
+ * reg-notes.def (TM): New.
+ * rtlanal.c (alloc_reg_note): Handle REG_TM.
+ * target.def (builtin_tm_load, builtin_tm_store): New.
+ * targhooks.c (default_builtin_tm_load_store): New.
+ * targhooks.h (default_builtin_tm_load_store): Declare.
+ * timevar.def (TV_TRANS_MEM): New.
+ * toplev.c (compile_file): Call finish_tm_clone_pairs.
+ * tree-cfg.c (make_edges): Handle GIMPLE_TRANSACTION.
+ (cleanup_dead_labels): Handle GIMPLE_TRANSACTION. Avoid unnecessary
+ writes into the statements to update labels.
+ (is_ctrl_altering_stmt): Add TM ending statements. Handle
+ GIMPLE_TRANSACTION.
+ (verify_gimple_transaction): New.
+ (verify_gimple_stmt): Handle GIMPLE_TRANSACTION.
+ (verify_gimple_in_seq_2): Handle GIMPLE_EH_ELSE and GIMPLE_TRANSACTION.
+ (gimple_redirect_edge_and_branch): Handle TM_TRANSACTION.
+ (dump_function_to_file): Display [tm-clone] if applicable.
+ * tree-eh.c (struct_ptr_eq): Make inline and move to tree.h.
+ (struct_ptr_hash): Same.
+ (collect_finally_tree): Handle GIMPLE_EH_ELSE.
+ (replace_goto_queue_1): Likewise.
+ (get_eh_else): New.
+ (honor_protect_cleanup_actions): Handle GIMPLE_EH_ELSE.
+ (lower_try_finally_nofallthru): Likewise.
+ (lower_try_finally_onedest): Likewise.
+ (lower_try_finally_copy): Likewise.
+ (lower_try_finally_switch): Likewise.
+ (lower_try_finally): Likewise.
+ (decide_copy_try_finally): Likewise.
+ (lower_eh_constructs_2): Likewise.
+ (refactor_eh_r): Likewise.
+ * tree-flow.h (struct gimple_df): Add tm_restart field.
+ Define tm_restart_node.
+ * tree-inline.c (remap_gimple_stmt): Handle GIMPLE_TRANSACTION.
+ (estimate_num_insns): Likewise.
+ (init_inline_once): Init tm_cost.
+ * tree-inline.h (struct eni_weights_d): Add tm_cost.
+ * tree-pass.h (pass_diagnose_tm_blocks, pass_lower_tm, pass_tm_init,
+ pass_tm_mark, pass_tm_memopt, pass_tm_edges, pass_ipa_tm): Declare.
+ * tree-pretty-print.c (dump_generic_node): Handle TRANSACTION_EXPR.
+ * tree-ssa-alias.c (ref_maybe_used_by_call_p_1): Handle
+ BUILT_IN_TM_MEMSET, BUILT_IN_TM_MEMCPY, BUILT_IN_TM_MEMMOVE.
+ Add support for TM vector loads. Add support for TM logging builtins.
+ (call_may_clobber_ref_p_1): Add support for vector stores.
+ * tree-ssa-structalias.c (find_func_aliases): Add support for TM
+ vector stores and loads. Handle BUILT_IN_TM_MEMSET,
+ BUILT_IN_TM_MEMCPY, BUILT_IN_TM_MEMMOVE.
+ * tree.c (strip_invariant_refs): Moved from gimple.c to here.
+ (local_define_builtin): Handle ECF_TM_PURE.
+ (build_common_builtin_nodes): Set __builtin_eh_pointer to ECF_TM_PURE.
+ * tree.def (TRANSACTION_EXPR): New.
+ * tree.h (strip_invariant_refs): Moved from gimple.h to here.
+ (TRANSACTION_EXPR_BODY, TRANSACTION_EXPR_CHECK,
+ TRANSACTION_EXPR_OUTER, TRANSACTION_EXPR_RELAXED,
+ BUILTIN_TM_LOAD_STORE_P, BUILTIN_TM_LOAD_P, BUILTIN_TM_STORE_P,
+ CASE_BUILT_IN_TM_LOAD, CASE_BUILT_IN_TM_STORE): New.
+ (ECF_TM_PURE, ECF_TM_BUILTIN): New.
+ (struct tree_function_decl): Add tm_clone_flag.
+ (struct_ptr_eq, struct_ptr_hash): New.
+ (apply_tm_attr): Declare.
+ (is_tm_safe_or_pure): New.
+ (build_tm_abort_call, is_tm_safe, is_tm_pure,
+ is_tm_may_cancel_outer, is_tm_ending_fndecl, record_tm_replacement,
+ tm_malloc_replacement): Declare.
+ * varasm.c (tm_clone_hash): New.
+ (record_tm_clone_pair, finish_tm_clone_pairs, get_tm_clone_pair,
+ dump_tm_clone_to_vec, dump_tm_clone_pairs, tm_alias_pair_cmp): New.
+ (struct tm_alias_pair): New. Declare VEC types for object.
+
+2011-11-07 Richard Henderson <rth@redhat.com>
* optabs.h (OTI_sync_compare_and_swap, OTI_sync_lock_test_and_set,
OTI_sync_old_add, OTI_sync_old_sub, OTI_sync_old_ior,
diff --git a/gcc/Makefile.in b/gcc/Makefile.in
index 20bb98ae92c..9ec2df1485b 100644
--- a/gcc/Makefile.in
+++ b/gcc/Makefile.in
@@ -856,7 +856,8 @@ RTL_H = $(RTL_BASE_H) genrtl.h vecir.h
RTL_ERROR_H = $(RTL_H) $(DIAGNOSTIC_CORE_H)
READ_MD_H = $(OBSTACK_H) $(HASHTAB_H) read-md.h
PARAMS_H = params.h params.def
-BUILTINS_DEF = builtins.def sync-builtins.def omp-builtins.def
+BUILTINS_DEF = builtins.def sync-builtins.def omp-builtins.def \
+ gtm-builtins.def
INTERNAL_FN_DEF = internal-fn.def
INTERNAL_FN_H = internal-fn.h $(INTERNAL_FN_DEF)
TREE_H = tree.h all-tree.def tree.def c-family/c-common.def \
@@ -869,6 +870,7 @@ BASIC_BLOCK_H = basic-block.h $(PREDICT_H) $(VEC_H) $(FUNCTION_H) cfghooks.h
GIMPLE_H = gimple.h gimple.def gsstruct.def pointer-set.h $(VEC_H) \
vecir.h $(GGC_H) $(BASIC_BLOCK_H) $(TARGET_H) tree-ssa-operands.h \
tree-ssa-alias.h $(INTERNAL_FN_H)
+TRANS_MEM_H = trans-mem.h
GCOV_IO_H = gcov-io.h gcov-iov.h auto-host.h
COVERAGE_H = coverage.h $(GCOV_IO_H)
DEMANGLE_H = $(srcdir)/../include/demangle.h
@@ -1352,6 +1354,7 @@ OBJS = \
timevar.o \
toplev.o \
tracer.o \
+ trans-mem.o \
tree-affine.o \
tree-call-cdce.o \
tree-cfg.o \
@@ -2158,6 +2161,12 @@ gtype-desc.o: gtype-desc.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
$(CFGLOOP_H) $(TARGET_H) $(IPA_PROP_H) $(LTO_STREAMER_H) \
target-globals.h
+trans-mem.o : trans-mem.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \
+ $(TREE_H) $(GIMPLE_H) $(TREE_FLOW_H) $(TREE_PASS_H) $(TREE_INLINE_H) \
+ $(DIAGNOSTIC_CORE_H) $(DEMANGLE_H) output.h $(TRANS_MEM_H) \
+ $(PARAMS_H) $(TARGET_H) langhooks.h \
+ tree-pretty-print.h gimple-pretty-print.h
+
ggc-common.o: ggc-common.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \
$(GGC_H) $(HASHTAB_H) $(DIAGNOSTIC_CORE_H) $(PARAMS_H) hosthooks.h \
$(HOSTHOOKS_DEF_H) $(VEC_H) $(PLUGIN_H) $(GGC_INTERNAL_H) $(TIMEVAR_H)
@@ -2684,6 +2693,7 @@ gimple.o : gimple.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TREE_H) \
gimple-pretty-print.o : gimple-pretty-print.c $(CONFIG_H) $(SYSTEM_H) \
$(TREE_H) $(DIAGNOSTIC_H) $(HASHTAB_H) $(TREE_FLOW_H) \
$(TM_H) coretypes.h $(TREE_PASS_H) $(GIMPLE_H) value-prof.h \
+ $(TRANS_MEM_H) \
tree-pretty-print.h gimple-pretty-print.h
tree-mudflap.o : $(CONFIG_H) $(SYSTEM_H) $(TREE_H) $(TREE_INLINE_H) \
$(GIMPLE_H) $(DIAGNOSTIC_H) $(DEMANGLE_H) $(HASHTAB_H) langhooks.h tree-mudflap.h \
@@ -3733,6 +3743,7 @@ GTFILES = $(CPP_ID_DATA_H) $(srcdir)/input.h $(srcdir)/coretypes.h \
$(srcdir)/lto-symtab.c \
$(srcdir)/tree-ssa-alias.h \
$(srcdir)/ipa-prop.h \
+ $(srcdir)/trans-mem.c \
$(srcdir)/lto-streamer.h \
$(srcdir)/target-globals.h \
$(srcdir)/ipa-inline.h \
diff --git a/gcc/attribs.c b/gcc/attribs.c
index 9448c0c6beb..0e94fd29cb5 100644
--- a/gcc/attribs.c
+++ b/gcc/attribs.c
@@ -166,7 +166,8 @@ init_attributes (void)
gcc_assert (strcmp (attribute_tables[i][j].name,
attribute_tables[i][k].name));
}
- /* Check that no name occurs in more than one table. */
+ /* Check that no name occurs in more than one table. Names that
+ begin with '*' are exempt, and may be overridden. */
for (i = 0; i < ARRAY_SIZE (attribute_tables); i++)
{
size_t j, k, l;
@@ -174,8 +175,9 @@ init_attributes (void)
for (j = i + 1; j < ARRAY_SIZE (attribute_tables); j++)
for (k = 0; attribute_tables[i][k].name != NULL; k++)
for (l = 0; attribute_tables[j][l].name != NULL; l++)
- gcc_assert (strcmp (attribute_tables[i][k].name,
- attribute_tables[j][l].name));
+ gcc_assert (attribute_tables[i][k].name[0] == '*'
+ || strcmp (attribute_tables[i][k].name,
+ attribute_tables[j][l].name));
}
#endif
@@ -207,7 +209,7 @@ register_attribute (const struct attribute_spec *attr)
slot = htab_find_slot_with_hash (attribute_hash, &str,
substring_hash (str.str, str.length),
INSERT);
- gcc_assert (!*slot);
+ gcc_assert (!*slot || attr->name[0] == '*');
*slot = (void *) CONST_CAST (struct attribute_spec *, attr);
}
@@ -484,3 +486,12 @@ decl_attributes (tree *node, tree attributes, int flags)
return returned_attrs;
}
+
+/* Subroutine of set_method_tm_attributes. Apply TM attribute ATTR
+ to the method FNDECL. */
+
+void
+apply_tm_attr (tree fndecl, tree attr)
+{
+ decl_attributes (&TREE_TYPE (fndecl), tree_cons (attr, NULL, NULL), 0);
+}
diff --git a/gcc/builtin-attrs.def b/gcc/builtin-attrs.def
index d0c3d9605ea..619794e85c8 100644
--- a/gcc/builtin-attrs.def
+++ b/gcc/builtin-attrs.def
@@ -96,6 +96,8 @@ DEF_ATTR_IDENT (ATTR_SENTINEL, "sentinel")
DEF_ATTR_IDENT (ATTR_STRFMON, "strfmon")
DEF_ATTR_IDENT (ATTR_STRFTIME, "strftime")
DEF_ATTR_IDENT (ATTR_TYPEGENERIC, "type generic")
+DEF_ATTR_IDENT (ATTR_TM_REGPARM, "*tm regparm")
+DEF_ATTR_IDENT (ATTR_TM_TMPURE, "transaction_pure")
DEF_ATTR_TREE_LIST (ATTR_NOVOPS_LIST, ATTR_NOVOPS, ATTR_NULL, ATTR_NULL)
@@ -227,6 +229,26 @@ DEF_FORMAT_ATTRIBUTE_NOTHROW(STRFMON,3,3_4)
#undef DEF_FORMAT_ATTRIBUTE_NOTHROW
#undef DEF_FORMAT_ATTRIBUTE_BOTH
+/* Transactional memory variants of the above. */
+
+DEF_ATTR_TREE_LIST (ATTR_TM_NOTHROW_LIST,
+ ATTR_TM_REGPARM, ATTR_NULL, ATTR_NOTHROW_LIST)
+DEF_ATTR_TREE_LIST (ATTR_TM_TMPURE_NOTHROW_LIST,
+ ATTR_TM_TMPURE, ATTR_NULL, ATTR_TM_NOTHROW_LIST)
+DEF_ATTR_TREE_LIST (ATTR_TM_PURE_TMPURE_NOTHROW_LIST,
+ ATTR_PURE, ATTR_NULL, ATTR_TM_TMPURE_NOTHROW_LIST)
+DEF_ATTR_TREE_LIST (ATTR_TM_NORETURN_NOTHROW_LIST,
+ ATTR_TM_REGPARM, ATTR_NULL, ATTR_NORETURN_NOTHROW_LIST)
+DEF_ATTR_TREE_LIST (ATTR_TM_CONST_NOTHROW_LIST,
+ ATTR_TM_REGPARM, ATTR_NULL, ATTR_CONST_NOTHROW_LIST)
+
+/* Same attributes used for BUILT_IN_MALLOC except with TM_PURE thrown in. */
+DEF_ATTR_TREE_LIST (ATTR_TMPURE_MALLOC_NOTHROW_LIST,
+ ATTR_TM_TMPURE, ATTR_NULL, ATTR_MALLOC_NOTHROW_LIST)
+/* Same attributes used for BUILT_IN_FREE except with TM_PURE thrown in. */
+DEF_ATTR_TREE_LIST (ATTR_TMPURE_NOTHROW_LIST,
+ ATTR_TM_TMPURE, ATTR_NULL, ATTR_NOTHROW_LIST)
+
/* Construct a tree for a format_arg attribute. */
#define DEF_FORMAT_ARG_ATTRIBUTE(FA) \
DEF_ATTR_TREE_LIST (ATTR_FORMAT_ARG_##FA, ATTR_FORMAT_ARG, \
diff --git a/gcc/builtin-types.def b/gcc/builtin-types.def
index a6d0127dadc..8edf744461e 100644
--- a/gcc/builtin-types.def
+++ b/gcc/builtin-types.def
@@ -530,3 +530,24 @@ DEF_FUNCTION_TYPE_VAR_5 (BT_FN_INT_INT_INT_INT_INT_INT_VAR,
DEF_POINTER_TYPE (BT_PTR_FN_VOID_VAR, BT_FN_VOID_VAR)
DEF_FUNCTION_TYPE_3 (BT_FN_PTR_PTR_FN_VOID_VAR_PTR_SIZE,
BT_PTR, BT_PTR_FN_VOID_VAR, BT_PTR, BT_SIZE)
+
+
+DEF_FUNCTION_TYPE_1 (BT_FN_I1_VPTR, BT_I1, BT_VOLATILE_PTR)
+DEF_FUNCTION_TYPE_1 (BT_FN_I2_VPTR, BT_I2, BT_VOLATILE_PTR)
+DEF_FUNCTION_TYPE_1 (BT_FN_I4_VPTR, BT_I4, BT_VOLATILE_PTR)
+DEF_FUNCTION_TYPE_1 (BT_FN_I8_VPTR, BT_I8, BT_VOLATILE_PTR)
+DEF_FUNCTION_TYPE_1 (BT_FN_FLOAT_VPTR, BT_FLOAT, BT_VOLATILE_PTR)
+DEF_FUNCTION_TYPE_1 (BT_FN_DOUBLE_VPTR, BT_DOUBLE, BT_VOLATILE_PTR)
+DEF_FUNCTION_TYPE_1 (BT_FN_LDOUBLE_VPTR, BT_LONGDOUBLE, BT_VOLATILE_PTR)
+
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_I1, BT_VOID, BT_VOLATILE_PTR, BT_I1)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_I2, BT_VOID, BT_VOLATILE_PTR, BT_I2)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_I4, BT_VOID, BT_VOLATILE_PTR, BT_I4)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_I8, BT_VOID, BT_VOLATILE_PTR, BT_I8)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_FLOAT, BT_VOID, BT_VOLATILE_PTR, BT_FLOAT)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_DOUBLE, BT_VOID,
+ BT_VOLATILE_PTR, BT_DOUBLE)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_LDOUBLE, BT_VOID,
+ BT_VOLATILE_PTR, BT_LONGDOUBLE)
+DEF_FUNCTION_TYPE_2 (BT_FN_VOID_VPTR_SIZE, BT_VOID,
+ BT_VOLATILE_PTR, BT_SIZE)
diff --git a/gcc/builtins.def b/gcc/builtins.def
index 0420b550f3b..616fca73597 100644
--- a/gcc/builtins.def
+++ b/gcc/builtins.def
@@ -142,6 +142,13 @@ along with GCC; see the file COPYING3. If not see
false, true, true, ATTRS, false, \
(flag_openmp || flag_tree_parallelize_loops))
+/* Builtin used by the implementation of GNU TM. These
+ functions are mapped to the actual implementation of the STM library. */
+#undef DEF_TM_BUILTIN
+#define DEF_TM_BUILTIN(ENUM, NAME, TYPE, ATTRS) \
+ DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \
+ false, true, true, ATTRS, false, flag_tm)
+
/* Define an attribute list for math functions that are normally
"impure" because some of them may write into global memory for
`errno'. If !flag_errno_math they are instead "const". */
@@ -624,6 +631,7 @@ DEF_GCC_BUILTIN (BUILT_IN_APPLY_ARGS, "apply_args", BT_FN_PTR_VAR, ATTR_L
DEF_GCC_BUILTIN (BUILT_IN_BSWAP32, "bswap32", BT_FN_UINT32_UINT32, ATTR_CONST_NOTHROW_LEAF_LIST)
DEF_GCC_BUILTIN (BUILT_IN_BSWAP64, "bswap64", BT_FN_UINT64_UINT64, ATTR_CONST_NOTHROW_LEAF_LIST)
DEF_EXT_LIB_BUILTIN (BUILT_IN_CLEAR_CACHE, "__clear_cache", BT_FN_VOID_PTR_PTR, ATTR_NOTHROW_LEAF_LIST)
+/* [trans-mem]: Adjust BUILT_IN_TM_CALLOC if BUILT_IN_CALLOC is changed. */
DEF_LIB_BUILTIN (BUILT_IN_CALLOC, "calloc", BT_FN_PTR_SIZE_SIZE, ATTR_MALLOC_NOTHROW_LEAF_LIST)
DEF_GCC_BUILTIN (BUILT_IN_CLASSIFY_TYPE, "classify_type", BT_FN_INT_VAR, ATTR_LEAF_LIST)
DEF_GCC_BUILTIN (BUILT_IN_CLZ, "clz", BT_FN_INT_UINT, ATTR_CONST_NOTHROW_LEAF_LIST)
@@ -662,6 +670,7 @@ DEF_EXT_LIB_BUILTIN (BUILT_IN_FFSL, "ffsl", BT_FN_INT_LONG, ATTR_CONST_NOTHRO
DEF_EXT_LIB_BUILTIN (BUILT_IN_FFSLL, "ffsll", BT_FN_INT_LONGLONG, ATTR_CONST_NOTHROW_LEAF_LIST)
DEF_EXT_LIB_BUILTIN (BUILT_IN_FORK, "fork", BT_FN_PID, ATTR_NOTHROW_LIST)
DEF_GCC_BUILTIN (BUILT_IN_FRAME_ADDRESS, "frame_address", BT_FN_PTR_UINT, ATTR_NULL)
+/* [trans-mem]: Adjust BUILT_IN_TM_FREE if BUILT_IN_FREE is changed. */
DEF_LIB_BUILTIN (BUILT_IN_FREE, "free", BT_FN_VOID_PTR, ATTR_NOTHROW_LIST)
DEF_GCC_BUILTIN (BUILT_IN_FROB_RETURN_ADDR, "frob_return_addr", BT_FN_PTR_PTR, ATTR_NULL)
DEF_EXT_LIB_BUILTIN (BUILT_IN_GETTEXT, "gettext", BT_FN_STRING_CONST_STRING, ATTR_FORMAT_ARG_1)
@@ -698,6 +707,7 @@ DEF_GCC_BUILTIN (BUILT_IN_ISUNORDERED, "isunordered", BT_FN_INT_VAR, ATTR
DEF_LIB_BUILTIN (BUILT_IN_LABS, "labs", BT_FN_LONG_LONG, ATTR_CONST_NOTHROW_LEAF_LIST)
DEF_C99_BUILTIN (BUILT_IN_LLABS, "llabs", BT_FN_LONGLONG_LONGLONG, ATTR_CONST_NOTHROW_LEAF_LIST)
DEF_GCC_BUILTIN (BUILT_IN_LONGJMP, "longjmp", BT_FN_VOID_PTR_INT, ATTR_NORETURN_NOTHROW_LEAF_LIST)
+/* [trans-mem]: Adjust BUILT_IN_TM_MALLOC if BUILT_IN_MALLOC is changed. */
DEF_LIB_BUILTIN (BUILT_IN_MALLOC, "malloc", BT_FN_PTR_SIZE, ATTR_MALLOC_NOTHROW_LEAF_LIST)
DEF_GCC_BUILTIN (BUILT_IN_NEXT_ARG, "next_arg", BT_FN_PTR_VAR, ATTR_LEAF_LIST)
DEF_GCC_BUILTIN (BUILT_IN_PARITY, "parity", BT_FN_INT_UINT, ATTR_CONST_NOTHROW_LEAF_LIST)
@@ -793,3 +803,6 @@ DEF_BUILTIN_STUB (BUILT_IN_EH_COPY_VALUES, "__builtin_eh_copy_values")
/* OpenMP builtins. */
#include "omp-builtins.def"
+
+/* GTM builtins. */
+#include "gtm-builtins.def"
diff --git a/gcc/c-family/ChangeLog b/gcc/c-family/ChangeLog
index 04134ec444d..914c9163dc8 100644
--- a/gcc/c-family/ChangeLog
+++ b/gcc/c-family/ChangeLog
@@ -1,3 +1,19 @@
+2011-11-07 Richard Henderson <rth@redhat.com>
+ Aldy Hernandez <aldyh@redhat.com>
+ Torvald Riegel <triegel@redhat.com>
+
+ Merged from transactional-memory.
+
+ * c-common.c (handle_tm_wrap_attribute,
+ handle_tm_attribute, ignore_attribute, parse_tm_stmt_attr): New.
+ (struct c_common_reswords): Added __transaction* keywords.
+ (struct c_common_attribute_table): Added transaction* and tm_regparm
+ attributes.
+ * c-common.h: Added RID_TRANSACTION*. Added TM_ATTR* and TM_STMT*
+ masks.
+ (parse_tm_stmt_attr, tm_attr_to_mask, tm_mask_to_attr,
+ find_tm_attribute): Declare.
+
2011-11-07 Jason Merrill <jason@redhat.com>
PR c++/35688
diff --git a/gcc/c-family/c-common.c b/gcc/c-family/c-common.c
index 0329bc7e01c..a6823311321 100644
--- a/gcc/c-family/c-common.c
+++ b/gcc/c-family/c-common.c
@@ -357,6 +357,8 @@ static tree handle_returns_twice_attribute (tree *, tree, tree, int, bool *);
static tree handle_no_limit_stack_attribute (tree *, tree, tree, int,
bool *);
static tree handle_pure_attribute (tree *, tree, tree, int, bool *);
+static tree handle_tm_attribute (tree *, tree, tree, int, bool *);
+static tree handle_tm_wrap_attribute (tree *, tree, tree, int, bool *);
static tree handle_novops_attribute (tree *, tree, tree, int, bool *);
static tree handle_deprecated_attribute (tree *, tree, tree, int,
bool *);
@@ -372,6 +374,7 @@ static tree handle_type_generic_attribute (tree *, tree, tree, int, bool *);
static tree handle_alloc_size_attribute (tree *, tree, tree, int, bool *);
static tree handle_target_attribute (tree *, tree, tree, int, bool *);
static tree handle_optimize_attribute (tree *, tree, tree, int, bool *);
+static tree ignore_attribute (tree *, tree, tree, int, bool *);
static tree handle_no_split_stack_attribute (tree *, tree, tree, int, bool *);
static tree handle_fnspec_attribute (tree *, tree, tree, int, bool *);
@@ -474,6 +477,9 @@ const struct c_common_resword c_common_reswords[] =
{ "__signed", RID_SIGNED, 0 },
{ "__signed__", RID_SIGNED, 0 },
{ "__thread", RID_THREAD, 0 },
+ { "__transaction_atomic", RID_TRANSACTION_ATOMIC, 0 },
+ { "__transaction_relaxed", RID_TRANSACTION_RELAXED, 0 },
+ { "__transaction_cancel", RID_TRANSACTION_CANCEL, 0 },
{ "__typeof", RID_TYPEOF, 0 },
{ "__typeof__", RID_TYPEOF, 0 },
{ "__underlying_type", RID_UNDERLYING_TYPE, D_CXXONLY },
@@ -666,6 +672,20 @@ const struct attribute_spec c_common_attribute_table[] =
handle_no_limit_stack_attribute, false },
{ "pure", 0, 0, true, false, false,
handle_pure_attribute, false },
+ { "transaction_callable", 0, 0, false, true, false,
+ handle_tm_attribute, false },
+ { "transaction_unsafe", 0, 0, false, true, false,
+ handle_tm_attribute, false },
+ { "transaction_safe", 0, 0, false, true, false,
+ handle_tm_attribute, false },
+ { "transaction_may_cancel_outer", 0, 0, false, true, false,
+ handle_tm_attribute, false },
+ /* ??? These two attributes didn't make the transition from the
+ Intel language document to the multi-vendor language document. */
+ { "transaction_pure", 0, 0, false, true, false,
+ handle_tm_attribute, false },
+ { "transaction_wrap", 1, 1, true, false, false,
+ handle_tm_wrap_attribute, false },
/* For internal use (marking of builtins) only. The name contains space
to prevent its usage in source code. */
{ "no vops", 0, 0, true, false, false,
@@ -707,6 +727,10 @@ const struct attribute_spec c_common_attribute_table[] =
handle_target_attribute, false },
{ "optimize", 1, -1, true, false, false,
handle_optimize_attribute, false },
+ /* For internal use only. The leading '*' both prevents its usage in
+ source code and signals that it may be overridden by machine tables. */
+ { "*tm regparm", 0, 0, false, true, true,
+ ignore_attribute, false },
{ "no_split_stack", 0, 0, true, false, false,
handle_no_split_stack_attribute, false },
/* For internal use (marking of builtins and runtime functions) only.
@@ -7315,6 +7339,223 @@ handle_pure_attribute (tree *node, tree name, tree ARG_UNUSED (args),
return NULL_TREE;
}
+/* Digest an attribute list destined for a transactional memory statement.
+ ALLOWED is the set of attributes that are allowed for this statement;
+ return the attribute we parsed. Multiple attributes are never allowed. */
+
+int
+parse_tm_stmt_attr (tree attrs, int allowed)
+{
+ tree a_seen = NULL;
+ int m_seen = 0;
+
+ for ( ; attrs ; attrs = TREE_CHAIN (attrs))
+ {
+ tree a = TREE_PURPOSE (attrs);
+ int m = 0;
+
+ if (is_attribute_p ("outer", a))
+ m = TM_STMT_ATTR_OUTER;
+
+ if ((m & allowed) == 0)
+ {
+ warning (OPT_Wattributes, "%qE attribute directive ignored", a);
+ continue;
+ }
+
+ if (m_seen == 0)
+ {
+ a_seen = a;
+ m_seen = m;
+ }
+ else if (m_seen == m)
+ warning (OPT_Wattributes, "%qE attribute duplicated", a);
+ else
+ warning (OPT_Wattributes, "%qE attribute follows %qE", a, a_seen);
+ }
+
+ return m_seen;
+}
+
+/* Transform a TM attribute name into a maskable integer and back.
+ Note that NULL (i.e. no attribute) is mapped to UNKNOWN, corresponding
+ to how the lack of an attribute is treated. */
+
+int
+tm_attr_to_mask (tree attr)
+{
+ if (attr == NULL)
+ return 0;
+ if (is_attribute_p ("transaction_safe", attr))
+ return TM_ATTR_SAFE;
+ if (is_attribute_p ("transaction_callable", attr))
+ return TM_ATTR_CALLABLE;
+ if (is_attribute_p ("transaction_pure", attr))
+ return TM_ATTR_PURE;
+ if (is_attribute_p ("transaction_unsafe", attr))
+ return TM_ATTR_IRREVOCABLE;
+ if (is_attribute_p ("transaction_may_cancel_outer", attr))
+ return TM_ATTR_MAY_CANCEL_OUTER;
+ return 0;
+}
+
+tree
+tm_mask_to_attr (int mask)
+{
+ const char *str;
+ switch (mask)
+ {
+ case TM_ATTR_SAFE:
+ str = "transaction_safe";
+ break;
+ case TM_ATTR_CALLABLE:
+ str = "transaction_callable";
+ break;
+ case TM_ATTR_PURE:
+ str = "transaction_pure";
+ break;
+ case TM_ATTR_IRREVOCABLE:
+ str = "transaction_unsafe";
+ break;
+ case TM_ATTR_MAY_CANCEL_OUTER:
+ str = "transaction_may_cancel_outer";
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ return get_identifier (str);
+}
+
+/* Return the first TM attribute seen in LIST. */
+
+tree
+find_tm_attribute (tree list)
+{
+ for (; list ; list = TREE_CHAIN (list))
+ {
+ tree name = TREE_PURPOSE (list);
+ if (tm_attr_to_mask (name) != 0)
+ return name;
+ }
+ return NULL_TREE;
+}
+
+/* Handle the TM attributes; arguments as in struct attribute_spec.handler.
+ Here we accept only function types, and verify that none of the other
+ function TM attributes are also applied. */
+/* ??? We need to accept class types for C++, but not C. This greatly
+ complicates this function, since we can no longer rely on the extra
+ processing given by function_type_required. */
+
+static tree
+handle_tm_attribute (tree *node, tree name, tree args,
+ int flags, bool *no_add_attrs)
+{
+ /* Only one path adds the attribute; others don't. */
+ *no_add_attrs = true;
+
+ switch (TREE_CODE (*node))
+ {
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ /* Only tm_callable and tm_safe apply to classes. */
+ if (tm_attr_to_mask (name) & ~(TM_ATTR_SAFE | TM_ATTR_CALLABLE))
+ goto ignored;
+ /* FALLTHRU */
+
+ case FUNCTION_TYPE:
+ case METHOD_TYPE:
+ {
+ tree old_name = find_tm_attribute (TYPE_ATTRIBUTES (*node));
+ if (old_name == name)
+ ;
+ else if (old_name != NULL_TREE)
+ error ("type was previously declared %qE", old_name);
+ else
+ *no_add_attrs = false;
+ }
+ break;
+
+ case POINTER_TYPE:
+ {
+ enum tree_code subcode = TREE_CODE (TREE_TYPE (*node));
+ if (subcode == FUNCTION_TYPE || subcode == METHOD_TYPE)
+ {
+ tree fn_tmp = TREE_TYPE (*node);
+ decl_attributes (&fn_tmp, tree_cons (name, args, NULL), 0);
+ *node = build_pointer_type (fn_tmp);
+ break;
+ }
+ }
+ /* FALLTHRU */
+
+ default:
+ /* If a function is next, pass it on to be tried next. */
+ if (flags & (int) ATTR_FLAG_FUNCTION_NEXT)
+ return tree_cons (name, args, NULL);
+
+ ignored:
+ warning (OPT_Wattributes, "%qE attribute ignored", name);
+ break;
+ }
+
+ return NULL_TREE;
+}
+
+/* Handle the TM_WRAP attribute; arguments as in
+ struct attribute_spec.handler. */
+
+static tree
+handle_tm_wrap_attribute (tree *node, tree name, tree args,
+ int ARG_UNUSED (flags), bool *no_add_attrs)
+{
+ tree decl = *node;
+
+ /* We don't need the attribute even on success, since we
+ record the entry in an external table. */
+ *no_add_attrs = true;
+
+ if (TREE_CODE (decl) != FUNCTION_DECL)
+ warning (OPT_Wattributes, "%qE attribute ignored", name);
+ else
+ {
+ tree wrap_decl = TREE_VALUE (args);
+ if (TREE_CODE (wrap_decl) != IDENTIFIER_NODE
+ && TREE_CODE (wrap_decl) != VAR_DECL
+ && TREE_CODE (wrap_decl) != FUNCTION_DECL)
+ error ("%qE argument not an identifier", name);
+ else
+ {
+ if (TREE_CODE (wrap_decl) == IDENTIFIER_NODE)
+ wrap_decl = lookup_name (wrap_decl);
+ if (wrap_decl && TREE_CODE (wrap_decl) == FUNCTION_DECL)
+ {
+ if (lang_hooks.types_compatible_p (TREE_TYPE (decl),
+ TREE_TYPE (wrap_decl)))
+ record_tm_replacement (wrap_decl, decl);
+ else
+ error ("%qD is not compatible with %qD", wrap_decl, decl);
+ }
+ else
+ error ("transaction_wrap argument is not a function");
+ }
+ }
+
+ return NULL_TREE;
+}
+
+/* Ignore the given attribute. Used when this attribute may be usefully
+ overridden by the target, but is not used generically. */
+
+static tree
+ignore_attribute (tree * ARG_UNUSED (node), tree ARG_UNUSED (name),
+ tree ARG_UNUSED (args), int ARG_UNUSED (flags),
+ bool *no_add_attrs)
+{
+ *no_add_attrs = true;
+ return NULL_TREE;
+}
+
/* Handle a "no vops" attribute; arguments as in
struct attribute_spec.handler. */
diff --git a/gcc/c-family/c-common.h b/gcc/c-family/c-common.h
index bff6956cc14..4d65dd1b7f1 100644
--- a/gcc/c-family/c-common.h
+++ b/gcc/c-family/c-common.h
@@ -113,6 +113,9 @@ enum rid
as a normal identifier. */
RID_CXX_COMPAT_WARN,
+ /* GNU transactional memory extension */
+ RID_TRANSACTION_ATOMIC, RID_TRANSACTION_RELAXED, RID_TRANSACTION_CANCEL,
+
/* Too many ways of getting the name of a function as a string */
RID_FUNCTION_NAME, RID_PRETTY_FUNCTION_NAME, RID_C99_FUNCTION_NAME,
@@ -1073,6 +1076,28 @@ c_tree_chain_next (tree t)
return NULL;
}
+/* Mask used by tm_stmt_attr. */
+#define TM_STMT_ATTR_OUTER 2
+#define TM_STMT_ATTR_ATOMIC 4
+#define TM_STMT_ATTR_RELAXED 8
+
+extern int parse_tm_stmt_attr (tree, int);
+
+/* Mask used by tm_attr_to_mask and tm_mask_to_attr. Note that these
+ are ordered specifically such that more restrictive attributes are
+ at lower bit positions. This fact is known by the C++ tm attribute
+ inheritance code such that least bit extraction (mask & -mask) results
+ in the most restrictive attribute. */
+#define TM_ATTR_SAFE 1
+#define TM_ATTR_CALLABLE 2
+#define TM_ATTR_PURE 4
+#define TM_ATTR_IRREVOCABLE 8
+#define TM_ATTR_MAY_CANCEL_OUTER 16
+
+extern int tm_attr_to_mask (tree);
+extern tree tm_mask_to_attr (int);
+extern tree find_tm_attribute (tree);
+
/* A suffix-identifier value doublet that represents user-defined literals
for C++-0x. */
struct GTY(()) tree_userdef_literal {
diff --git a/gcc/c-parser.c b/gcc/c-parser.c
index 58bcb0282a1..aed390f31e8 100644
--- a/gcc/c-parser.c
+++ b/gcc/c-parser.c
@@ -195,6 +195,9 @@ typedef struct GTY(()) c_parser {
undesirable to bind an identifier to an Objective-C class, even
if a class with that name exists. */
BOOL_BITFIELD objc_need_raw_identifier : 1;
+ /* Nonzero if we're processing a __transaction statement. The value
+ is 1 | TM_STMT_ATTR_*. */
+ unsigned int in_transaction : 4;
/* True if we are in a context where the Objective-C "Property attribute"
keywords are valid. */
BOOL_BITFIELD objc_property_attr_context : 1;
@@ -1171,6 +1174,9 @@ static struct c_expr c_parser_postfix_expression_after_paren_type (c_parser *,
static struct c_expr c_parser_postfix_expression_after_primary (c_parser *,
location_t loc,
struct c_expr);
+static tree c_parser_transaction (c_parser *, enum rid);
+static struct c_expr c_parser_transaction_expression (c_parser *, enum rid);
+static tree c_parser_transaction_cancel (c_parser *);
static struct c_expr c_parser_expression (c_parser *);
static struct c_expr c_parser_expression_conv (c_parser *);
static VEC(tree,gc) *c_parser_expr_list (c_parser *, bool, bool,
@@ -3413,6 +3419,66 @@ c_parser_simple_asm_expr (c_parser *parser)
return str;
}
+static tree
+c_parser_attribute_any_word (c_parser *parser)
+{
+ tree attr_name = NULL_TREE;
+
+ if (c_parser_next_token_is (parser, CPP_KEYWORD))
+ {
+ /* ??? See comment above about what keywords are accepted here. */
+ bool ok;
+ switch (c_parser_peek_token (parser)->keyword)
+ {
+ case RID_STATIC:
+ case RID_UNSIGNED:
+ case RID_LONG:
+ case RID_INT128:
+ case RID_CONST:
+ case RID_EXTERN:
+ case RID_REGISTER:
+ case RID_TYPEDEF:
+ case RID_SHORT:
+ case RID_INLINE:
+ case RID_NORETURN:
+ case RID_VOLATILE:
+ case RID_SIGNED:
+ case RID_AUTO:
+ case RID_RESTRICT:
+ case RID_COMPLEX:
+ case RID_THREAD:
+ case RID_INT:
+ case RID_CHAR:
+ case RID_FLOAT:
+ case RID_DOUBLE:
+ case RID_VOID:
+ case RID_DFLOAT32:
+ case RID_DFLOAT64:
+ case RID_DFLOAT128:
+ case RID_BOOL:
+ case RID_FRACT:
+ case RID_ACCUM:
+ case RID_SAT:
+ case RID_TRANSACTION_ATOMIC:
+ case RID_TRANSACTION_CANCEL:
+ ok = true;
+ break;
+ default:
+ ok = false;
+ break;
+ }
+ if (!ok)
+ return NULL_TREE;
+
+ /* Accept __attribute__((__const)) as __attribute__((const)) etc. */
+ attr_name = ridpointers[(int) c_parser_peek_token (parser)->keyword];
+ }
+ else if (c_parser_next_token_is (parser, CPP_NAME))
+ attr_name = c_parser_peek_token (parser)->value;
+
+ return attr_name;
+}
+
/* Parse (possibly empty) attributes. This is a GNU extension.
attributes:
@@ -3473,57 +3539,10 @@ c_parser_attributes (c_parser *parser)
c_parser_consume_token (parser);
continue;
}
- if (c_parser_next_token_is (parser, CPP_KEYWORD))
- {
- /* ??? See comment above about what keywords are
- accepted here. */
- bool ok;
- switch (c_parser_peek_token (parser)->keyword)
- {
- case RID_STATIC:
- case RID_UNSIGNED:
- case RID_LONG:
- case RID_INT128:
- case RID_CONST:
- case RID_EXTERN:
- case RID_REGISTER:
- case RID_TYPEDEF:
- case RID_SHORT:
- case RID_INLINE:
- case RID_NORETURN:
- case RID_VOLATILE:
- case RID_SIGNED:
- case RID_AUTO:
- case RID_RESTRICT:
- case RID_COMPLEX:
- case RID_THREAD:
- case RID_INT:
- case RID_CHAR:
- case RID_FLOAT:
- case RID_DOUBLE:
- case RID_VOID:
- case RID_DFLOAT32:
- case RID_DFLOAT64:
- case RID_DFLOAT128:
- case RID_BOOL:
- case RID_FRACT:
- case RID_ACCUM:
- case RID_SAT:
- ok = true;
- break;
- default:
- ok = false;
- break;
- }
- if (!ok)
- break;
- /* Accept __attribute__((__const)) as __attribute__((const))
- etc. */
- attr_name
- = ridpointers[(int) c_parser_peek_token (parser)->keyword];
- }
- else
- attr_name = c_parser_peek_token (parser)->value;
+
+ attr_name = c_parser_attribute_any_word (parser);
+ if (attr_name == NULL)
+ break;
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_OPEN_PAREN))
{
@@ -4394,7 +4413,14 @@ c_parser_label (c_parser *parser)
atomic-directive expression-statement
ordered-construct:
- ordered-directive structured-block */
+ ordered-directive structured-block
+
+ Transactional Memory:
+
+ statement:
+ transaction-statement
+ transaction-cancel-statement
+*/
static void
c_parser_statement (c_parser *parser)
@@ -4485,6 +4511,14 @@ c_parser_statement_after_labels (c_parser *parser)
case RID_ASM:
stmt = c_parser_asm_statement (parser);
break;
+ case RID_TRANSACTION_ATOMIC:
+ case RID_TRANSACTION_RELAXED:
+ stmt = c_parser_transaction (parser,
+ c_parser_peek_token (parser)->keyword);
+ break;
+ case RID_TRANSACTION_CANCEL:
+ stmt = c_parser_transaction_cancel (parser);
+ goto expect_semicolon;
case RID_AT_THROW:
gcc_assert (c_dialect_objc ());
c_parser_consume_token (parser);
@@ -5812,6 +5846,11 @@ c_parser_cast_expression (c_parser *parser, struct c_expr *after)
unary-operator: one of
__extension__ __real__ __imag__
+ Transactional Memory:
+
+ unary-expression:
+ transaction-expression
+
In addition, the GNU syntax treats ++ and -- as unary operators, so
they may be applied to cast expressions with errors for non-lvalues
given later. */
@@ -5919,6 +5958,10 @@ c_parser_unary_expression (c_parser *parser)
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_conversion (exp_loc, op);
return parser_build_unary_op (op_loc, IMAGPART_EXPR, op);
+ case RID_TRANSACTION_ATOMIC:
+ case RID_TRANSACTION_RELAXED:
+ return c_parser_transaction_expression (parser,
+ c_parser_peek_token (parser)->keyword);
default:
return c_parser_postfix_expression (parser);
}
@@ -10535,6 +10578,212 @@ c_parser_omp_threadprivate (c_parser *parser)
c_parser_skip_to_pragma_eol (parser);
}
+/* Parse a transaction attribute (GCC Extension).
+
+ transaction-attribute:
+ attributes
+ [ [ any-word ] ]
+
+ The transactional memory language description is written for C++,
+ and uses the C++0x attribute syntax. For compatibility, allow the
+ bracket style for transactions in C as well. */
+
+static tree
+c_parser_transaction_attributes (c_parser *parser)
+{
+ tree attr_name, attr = NULL;
+
+ if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
+ return c_parser_attributes (parser);
+
+ if (!c_parser_next_token_is (parser, CPP_OPEN_SQUARE))
+ return NULL_TREE;
+ c_parser_consume_token (parser);
+ if (!c_parser_require (parser, CPP_OPEN_SQUARE, "expected %<[%>"))
+ goto error1;
+
+ attr_name = c_parser_attribute_any_word (parser);
+ if (attr_name)
+ {
+ c_parser_consume_token (parser);
+ attr = build_tree_list (attr_name, NULL_TREE);
+ }
+ else
+ c_parser_error (parser, "expected identifier");
+
+ c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>");
+ error1:
+ c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>");
+ return attr;
+}
+
+/* Parse a __transaction_atomic or __transaction_relaxed statement
+ (GCC Extension).
+
+ transaction-statement:
+ __transaction_atomic transaction-attribute[opt] compound-statement
+ __transaction_relaxed compound-statement
+
+ Note that the only valid attribute is: "outer".
+*/
+
+static tree
+c_parser_transaction (c_parser *parser, enum rid keyword)
+{
+ unsigned int old_in = parser->in_transaction;
+ unsigned int this_in = 1, new_in;
+ location_t loc = c_parser_peek_token (parser)->location;
+ tree stmt, attrs;
+
+ gcc_assert ((keyword == RID_TRANSACTION_ATOMIC
+ || keyword == RID_TRANSACTION_RELAXED)
+ && c_parser_next_token_is_keyword (parser, keyword));
+ c_parser_consume_token (parser);
+
+ if (keyword == RID_TRANSACTION_RELAXED)
+ this_in |= TM_STMT_ATTR_RELAXED;
+ else
+ {
+ attrs = c_parser_transaction_attributes (parser);
+ if (attrs)
+ this_in |= parse_tm_stmt_attr (attrs, TM_STMT_ATTR_OUTER);
+ }
+
+ /* Keep track if we're in the lexical scope of an outer transaction. */
+ new_in = this_in | (old_in & TM_STMT_ATTR_OUTER);
+
+ parser->in_transaction = new_in;
+ stmt = c_parser_compound_statement (parser);
+ parser->in_transaction = old_in;
+
+ if (flag_tm)
+ stmt = c_finish_transaction (loc, stmt, this_in);
+ else
+ error_at (loc, (keyword == RID_TRANSACTION_ATOMIC ?
+ "%<__transaction_atomic%> without transactional memory support enabled"
+ : "%<__transaction_relaxed %> "
+ "without transactional memory support enabled"));
+
+ return stmt;
+}
+
+/* Parse a __transaction_atomic or __transaction_relaxed expression
+ (GCC Extension).
+
+ transaction-expression:
+ __transaction_atomic ( expression )
+ __transaction_relaxed ( expression )
+*/
+
+static struct c_expr
+c_parser_transaction_expression (c_parser *parser, enum rid keyword)
+{
+ struct c_expr ret;
+ unsigned int old_in = parser->in_transaction;
+ unsigned int this_in = 1;
+ location_t loc = c_parser_peek_token (parser)->location;
+ tree attrs;
+
+ gcc_assert ((keyword == RID_TRANSACTION_ATOMIC
+ || keyword == RID_TRANSACTION_RELAXED)
+ && c_parser_next_token_is_keyword (parser, keyword));
+ c_parser_consume_token (parser);
+
+ if (keyword == RID_TRANSACTION_RELAXED)
+ this_in |= TM_STMT_ATTR_RELAXED;
+ else
+ {
+ attrs = c_parser_transaction_attributes (parser);
+ if (attrs)
+ this_in |= parse_tm_stmt_attr (attrs, 0);
+ }
+
+ parser->in_transaction = this_in;
+ if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
+ {
+ tree expr = c_parser_expression (parser).value;
+ ret.original_type = TREE_TYPE (expr);
+ ret.value = build1 (TRANSACTION_EXPR, ret.original_type, expr);
+ if (this_in & TM_STMT_ATTR_RELAXED)
+ TRANSACTION_EXPR_RELAXED (ret.value) = 1;
+ SET_EXPR_LOCATION (ret.value, loc);
+ ret.original_code = TRANSACTION_EXPR;
+ }
+ else
+ {
+ c_parser_error (parser, "expected %<(%>");
+ ret.value = error_mark_node;
+ ret.original_code = ERROR_MARK;
+ ret.original_type = NULL;
+ }
+ parser->in_transaction = old_in;
+
+ if (!flag_tm)
+ error_at (loc, (keyword == RID_TRANSACTION_ATOMIC ?
+ "%<__transaction_atomic%> without transactional memory support enabled"
+ : "%<__transaction_relaxed %> "
+ "without transactional memory support enabled"));
+
+ return ret;
+}
+
+/* Parse a __transaction_cancel statement (GCC Extension).
+
+ transaction-cancel-statement:
+ __transaction_cancel transaction-attribute[opt] ;
+
+ Note that the only valid attribute is "outer".
+*/
+
+static tree
+c_parser_transaction_cancel(c_parser *parser)
+{
+ location_t loc = c_parser_peek_token (parser)->location;
+ tree attrs;
+ bool is_outer = false;
+
+ gcc_assert (c_parser_next_token_is_keyword (parser, RID_TRANSACTION_CANCEL));
+ c_parser_consume_token (parser);
+
+ attrs = c_parser_transaction_attributes (parser);
+ if (attrs)
+ is_outer = (parse_tm_stmt_attr (attrs, TM_STMT_ATTR_OUTER) != 0);
+
+ if (!flag_tm)
+ {
+ error_at (loc, "%<__transaction_cancel%> without "
+ "transactional memory support enabled");
+ goto ret_error;
+ }
+ else if (parser->in_transaction & TM_STMT_ATTR_RELAXED)
+ {
+ error_at (loc, "%<__transaction_cancel%> within a "
+ "%<__transaction_relaxed%>");
+ goto ret_error;
+ }
+ else if (is_outer)
+ {
+ if ((parser->in_transaction & TM_STMT_ATTR_OUTER) == 0
+ && !is_tm_may_cancel_outer (current_function_decl))
+ {
+ error_at (loc, "outer %<__transaction_cancel%> not "
+ "within outer %<__transaction_atomic%>");
+ error_at (loc, " or a %<transaction_may_cancel_outer%> function");
+ goto ret_error;
+ }
+ }
+ else if (parser->in_transaction == 0)
+ {
+ error_at (loc, "%<__transaction_cancel%> not within "
+ "%<__transaction_atomic%>");
+ goto ret_error;
+ }
+
+ return add_stmt (build_tm_abort_call (loc, is_outer));
+
+ ret_error:
+ return build1 (NOP_EXPR, void_type_node, error_mark_node);
+}
/* Parse a single source file. */
diff --git a/gcc/c-tree.h b/gcc/c-tree.h
index 6801db2b98b..51c660c0346 100644
--- a/gcc/c-tree.h
+++ b/gcc/c-tree.h
@@ -603,6 +603,7 @@ extern tree c_begin_omp_task (void);
extern tree c_finish_omp_task (location_t, tree, tree);
extern tree c_finish_omp_clauses (tree);
extern tree c_build_va_arg (location_t, tree, tree);
+extern tree c_finish_transaction (location_t, tree, int);
extern tree c_build_vec_perm_expr (location_t, tree, tree, tree);
/* Set to 0 at beginning of a function definition, set to 1 if
diff --git a/gcc/c-typeck.c b/gcc/c-typeck.c
index 392ac652c0a..4a134b0e524 100644
--- a/gcc/c-typeck.c
+++ b/gcc/c-typeck.c
@@ -2716,6 +2716,9 @@ build_function_call_vec (location_t loc, tree function, VEC(tree,gc) *params,
return tem;
name = DECL_NAME (function);
+
+ if (flag_tm)
+ tm_malloc_replacement (function);
fundecl = function;
/* Atomic functions have type checking/casting already done. They are
often rewritten and don't match the original parameter list. */
@@ -10922,6 +10925,19 @@ c_finish_omp_clauses (tree clauses)
return clauses;
}
+/* Create a transaction node. */
+
+tree
+c_finish_transaction (location_t loc, tree block, int flags)
+{
+ tree stmt = build_stmt (loc, TRANSACTION_EXPR, block);
+ if (flags & TM_STMT_ATTR_OUTER)
+ TRANSACTION_EXPR_OUTER (stmt) = 1;
+ if (flags & TM_STMT_ATTR_RELAXED)
+ TRANSACTION_EXPR_RELAXED (stmt) = 1;
+ return add_stmt (stmt);
+}
+
/* Make a variant type in the proper way for C/C++, propagating qualifiers
down to the element type of an array. */
diff --git a/gcc/calls.c b/gcc/calls.c
index eeb6b2ec602..382de7fcdeb 100644
--- a/gcc/calls.c
+++ b/gcc/calls.c
@@ -611,6 +611,69 @@ alloca_call_p (const_tree exp)
return false;
}
+/* Return TRUE if FNDECL is either a TM builtin or a TM cloned
+ function. Return FALSE otherwise. */
+
+static bool
+is_tm_builtin (const_tree fndecl)
+{
+ if (fndecl == NULL)
+ return false;
+
+ if (decl_is_tm_clone (fndecl))
+ return true;
+
+ if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
+ {
+ switch (DECL_FUNCTION_CODE (fndecl))
+ {
+ case BUILT_IN_TM_COMMIT:
+ case BUILT_IN_TM_COMMIT_EH:
+ case BUILT_IN_TM_ABORT:
+ case BUILT_IN_TM_IRREVOCABLE:
+ case BUILT_IN_TM_GETTMCLONE_IRR:
+ case BUILT_IN_TM_MEMCPY:
+ case BUILT_IN_TM_MEMMOVE:
+ case BUILT_IN_TM_MEMSET:
+ CASE_BUILT_IN_TM_STORE (1):
+ CASE_BUILT_IN_TM_STORE (2):
+ CASE_BUILT_IN_TM_STORE (4):
+ CASE_BUILT_IN_TM_STORE (8):
+ CASE_BUILT_IN_TM_STORE (FLOAT):
+ CASE_BUILT_IN_TM_STORE (DOUBLE):
+ CASE_BUILT_IN_TM_STORE (LDOUBLE):
+ CASE_BUILT_IN_TM_STORE (M64):
+ CASE_BUILT_IN_TM_STORE (M128):
+ CASE_BUILT_IN_TM_STORE (M256):
+ CASE_BUILT_IN_TM_LOAD (1):
+ CASE_BUILT_IN_TM_LOAD (2):
+ CASE_BUILT_IN_TM_LOAD (4):
+ CASE_BUILT_IN_TM_LOAD (8):
+ CASE_BUILT_IN_TM_LOAD (FLOAT):
+ CASE_BUILT_IN_TM_LOAD (DOUBLE):
+ CASE_BUILT_IN_TM_LOAD (LDOUBLE):
+ CASE_BUILT_IN_TM_LOAD (M64):
+ CASE_BUILT_IN_TM_LOAD (M128):
+ CASE_BUILT_IN_TM_LOAD (M256):
+ case BUILT_IN_TM_LOG:
+ case BUILT_IN_TM_LOG_1:
+ case BUILT_IN_TM_LOG_2:
+ case BUILT_IN_TM_LOG_4:
+ case BUILT_IN_TM_LOG_8:
+ case BUILT_IN_TM_LOG_FLOAT:
+ case BUILT_IN_TM_LOG_DOUBLE:
+ case BUILT_IN_TM_LOG_LDOUBLE:
+ case BUILT_IN_TM_LOG_M64:
+ case BUILT_IN_TM_LOG_M128:
+ case BUILT_IN_TM_LOG_M256:
+ return true;
+ default:
+ break;
+ }
+ }
+ return false;
+}
+
/* Detect flags (function attributes) from the function decl or type node. */
int
@@ -644,10 +707,28 @@ flags_from_decl_or_type (const_tree exp)
if (TREE_NOTHROW (exp))
flags |= ECF_NOTHROW;
+ if (flag_tm)
+ {
+ if (is_tm_builtin (exp))
+ flags |= ECF_TM_BUILTIN;
+ else if ((flags & ECF_CONST) != 0
+ || lookup_attribute ("transaction_pure",
+ TYPE_ATTRIBUTES (TREE_TYPE (exp))))
+ flags |= ECF_TM_PURE;
+ }
+
flags = special_function_p (exp, flags);
}
- else if (TYPE_P (exp) && TYPE_READONLY (exp))
- flags |= ECF_CONST;
+ else if (TYPE_P (exp))
+ {
+ if (TYPE_READONLY (exp))
+ flags |= ECF_CONST;
+
+ if (flag_tm
+ && ((flags & ECF_CONST) != 0
+ || lookup_attribute ("transaction_pure", TYPE_ATTRIBUTES (exp))))
+ flags |= ECF_TM_PURE;
+ }
if (TREE_THIS_VOLATILE (exp))
{
diff --git a/gcc/cfgbuild.c b/gcc/cfgbuild.c
index 6f0d69e4523..692fea8a17b 100644
--- a/gcc/cfgbuild.c
+++ b/gcc/cfgbuild.c
@@ -338,18 +338,30 @@ make_edges (basic_block min, basic_block max, int update_p)
/* Add any appropriate EH edges. */
rtl_make_eh_edge (edge_cache, bb, insn);
- if (code == CALL_INSN && nonlocal_goto_handler_labels)
+ if (code == CALL_INSN)
{
- /* ??? This could be made smarter: in some cases it's possible
- to tell that certain calls will not do a nonlocal goto.
- For example, if the nested functions that do the nonlocal
- gotos do not have their addresses taken, then only calls to
- those functions or to other nested functions that use them
- could possibly do nonlocal gotos. */
if (can_nonlocal_goto (insn))
- for (x = nonlocal_goto_handler_labels; x; x = XEXP (x, 1))
- make_label_edge (edge_cache, bb, XEXP (x, 0),
- EDGE_ABNORMAL | EDGE_ABNORMAL_CALL);
+ {
+ /* ??? This could be made smarter: in some cases it's
+ possible to tell that certain calls will not do a
+ nonlocal goto. For example, if the nested functions
+ that do the nonlocal gotos do not have their addresses
+ taken, then only calls to those functions or to other
+ nested functions that use them could possibly do
+ nonlocal gotos. */
+ for (x = nonlocal_goto_handler_labels; x; x = XEXP (x, 1))
+ make_label_edge (edge_cache, bb, XEXP (x, 0),
+ EDGE_ABNORMAL | EDGE_ABNORMAL_CALL);
+ }
+
+ if (flag_tm)
+ {
+ rtx note;
+ for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
+ if (REG_NOTE_KIND (note) == REG_TM)
+ make_label_edge (edge_cache, bb, XEXP (note, 0),
+ EDGE_ABNORMAL | EDGE_ABNORMAL_CALL);
+ }
}
}
diff --git a/gcc/cfgexpand.c b/gcc/cfgexpand.c
index 6fb9ee0c5a1..3d733337cff 100644
--- a/gcc/cfgexpand.c
+++ b/gcc/cfgexpand.c
@@ -1802,6 +1802,38 @@ expand_gimple_cond (basic_block bb, gimple stmt)
return new_bb;
}
+/* Mark all calls that can have a transaction restart. */
+
+static void
+mark_transaction_restart_calls (gimple stmt)
+{
+ struct tm_restart_node dummy;
+ void **slot;
+
+ if (!cfun->gimple_df->tm_restart)
+ return;
+
+ dummy.stmt = stmt;
+ slot = htab_find_slot (cfun->gimple_df->tm_restart, &dummy, NO_INSERT);
+ if (slot)
+ {
+ struct tm_restart_node *n = (struct tm_restart_node *) *slot;
+ tree list = n->label_or_list;
+ rtx insn;
+
+ for (insn = next_real_insn (get_last_insn ());
+ !CALL_P (insn);
+ insn = next_real_insn (insn))
+ continue;
+
+ if (TREE_CODE (list) == LABEL_DECL)
+ add_reg_note (insn, REG_TM, label_rtx (list));
+ else
+ for (; list ; list = TREE_CHAIN (list))
+ add_reg_note (insn, REG_TM, label_rtx (TREE_VALUE (list)));
+ }
+}
+
/* A subroutine of expand_gimple_stmt_1, expanding one GIMPLE_CALL
statement STMT. */
@@ -1888,6 +1920,8 @@ expand_call_stmt (gimple stmt)
expand_assignment (lhs, exp, false);
else
expand_expr_real_1 (exp, const0_rtx, VOIDmode, EXPAND_NORMAL, NULL);
+
+ mark_transaction_restart_calls (stmt);
}
/* A subroutine of expand_gimple_stmt, expanding one gimple statement
@@ -4455,6 +4489,14 @@ gimple_expand_cfg (void)
/* After expanding, the return labels are no longer needed. */
return_label = NULL;
naked_return_label = NULL;
+
+ /* After expanding, the tm_restart map is no longer needed. */
+ if (cfun->gimple_df->tm_restart)
+ {
+ htab_delete (cfun->gimple_df->tm_restart);
+ cfun->gimple_df->tm_restart = NULL;
+ }
+
/* Tag the blocks with a depth number so that change_scope can find
the common parent easily. */
set_block_levels (DECL_INITIAL (cfun->decl), 0);
diff --git a/gcc/cfgrtl.c b/gcc/cfgrtl.c
index f06dbc83b1d..6e9f70ed719 100644
--- a/gcc/cfgrtl.c
+++ b/gcc/cfgrtl.c
@@ -2246,6 +2246,8 @@ purge_dead_edges (basic_block bb)
;
else if ((e->flags & EDGE_EH) && can_throw_internal (insn))
;
+ else if (flag_tm && find_reg_note (insn, REG_TM, NULL))
+ ;
else
remove = true;
}
diff --git a/gcc/cgraph.c b/gcc/cgraph.c
index f056d3db58e..2d226d49939 100644
--- a/gcc/cgraph.c
+++ b/gcc/cgraph.c
@@ -1840,6 +1840,8 @@ dump_cgraph_node (FILE *f, struct cgraph_node *node)
fprintf (f, " only_called_at_exit");
else if (node->alias)
fprintf (f, " alias");
+ if (node->tm_clone)
+ fprintf (f, " tm_clone");
fprintf (f, "\n");
diff --git a/gcc/cgraph.h b/gcc/cgraph.h
index 294fb772a5b..9e98ce9e2ef 100644
--- a/gcc/cgraph.h
+++ b/gcc/cgraph.h
@@ -98,6 +98,9 @@ struct GTY(()) cgraph_local_info {
/* True when the function has been originally extern inline, but it is
redefined now. */
unsigned redefined_extern_inline : 1;
+
+ /* True if the function may enter serial irrevocable mode. */
+ unsigned tm_may_enter_irr : 1;
};
/* Information about the function that needs to be computed globally
@@ -245,6 +248,11 @@ struct GTY((chain_next ("%h.next"), chain_prev ("%h.previous"))) cgraph_node {
unsigned only_called_at_startup : 1;
/* True when function can only be called at startup (from static dtor). */
unsigned only_called_at_exit : 1;
+ /* True when function is the transactional clone of a function which
+ is called only from inside transactions. */
+ /* ?? We should be able to remove this. We have enough bits in
+ cgraph to calculate it. */
+ unsigned tm_clone : 1;
};
typedef struct cgraph_node *cgraph_node_ptr;
@@ -565,6 +573,8 @@ void verify_cgraph_node (struct cgraph_node *);
void cgraph_build_static_cdtor (char which, tree body, int priority);
void cgraph_reset_static_var_maps (void);
void init_cgraph (void);
+struct cgraph_node * cgraph_copy_node_for_versioning (struct cgraph_node *,
+ tree, VEC(cgraph_edge_p,heap)*, bitmap);
struct cgraph_node *cgraph_function_versioning (struct cgraph_node *,
VEC(cgraph_edge_p,heap)*,
VEC(ipa_replace_map_p,gc)*,
@@ -1082,4 +1092,14 @@ cgraph_edge_recursive_p (struct cgraph_edge *e)
else
return e->caller->decl == callee->decl;
}
+
+/* Return true if the TM_CLONE bit is set for a given FNDECL. */
+static inline bool
+decl_is_tm_clone (const_tree fndecl)
+{
+ struct cgraph_node *n = cgraph_get_node (fndecl);
+ if (n)
+ return n->tm_clone;
+ return false;
+}
#endif /* GCC_CGRAPH_H */
diff --git a/gcc/cgraphunit.c b/gcc/cgraphunit.c
index 83c47ab66fd..e401b8f2f2b 100644
--- a/gcc/cgraphunit.c
+++ b/gcc/cgraphunit.c
@@ -2272,7 +2272,7 @@ update_call_expr (struct cgraph_node *new_version)
was copied to prevent duplications of calls that are dead
in the clone. */
-static struct cgraph_node *
+struct cgraph_node *
cgraph_copy_node_for_versioning (struct cgraph_node *old_version,
tree new_decl,
VEC(cgraph_edge_p,heap) *redirect_callers,
@@ -2286,7 +2286,7 @@ cgraph_copy_node_for_versioning (struct cgraph_node *old_version,
new_version = cgraph_create_node (new_decl);
- new_version->analyzed = true;
+ new_version->analyzed = old_version->analyzed;
new_version->local = old_version->local;
new_version->local.externally_visible = false;
new_version->local.local = true;
diff --git a/gcc/combine.c b/gcc/combine.c
index 29411149173..ad9aa389871 100644
--- a/gcc/combine.c
+++ b/gcc/combine.c
@@ -13286,6 +13286,7 @@ distribute_notes (rtx notes, rtx from_insn, rtx i3, rtx i2, rtx elim_i2,
case REG_NORETURN:
case REG_SETJMP:
+ case REG_TM:
/* These notes must remain with the call. It should not be
possible for both I2 and I3 to be a call. */
if (CALL_P (i3))
diff --git a/gcc/common.opt b/gcc/common.opt
index 1871054a21c..4eb5b30b1c4 100644
--- a/gcc/common.opt
+++ b/gcc/common.opt
@@ -1194,6 +1194,10 @@ floop-block
Common Report Var(flag_loop_block) Optimization
Enable Loop Blocking transformation
+fgnu-tm
+Common Report Var(flag_tm)
+Enable support for GNU transactional memory
+
floop-flatten
Common Report Var(flag_loop_flatten) Optimization
Enable Loop Flattening transformation
diff --git a/gcc/config/i386/i386-builtin-types.def b/gcc/config/i386/i386-builtin-types.def
index 5f64b08505c..5dcb68c2d43 100644
--- a/gcc/config/i386/i386-builtin-types.def
+++ b/gcc/config/i386/i386-builtin-types.def
@@ -114,6 +114,7 @@ DEF_POINTER_TYPE (PINT, INT)
DEF_POINTER_TYPE (PULONGLONG, ULONGLONG)
DEF_POINTER_TYPE (PUNSIGNED, UNSIGNED)
+DEF_POINTER_TYPE (PV2SI, V2SI)
DEF_POINTER_TYPE (PV2DF, V2DF)
DEF_POINTER_TYPE (PV2DI, V2DI)
DEF_POINTER_TYPE (PV2SF, V2SF)
@@ -124,6 +125,7 @@ DEF_POINTER_TYPE (PV8SF, V8SF)
DEF_POINTER_TYPE (PV4SI, V4SI)
DEF_POINTER_TYPE (PV8SI, V8SI)
+DEF_POINTER_TYPE (PCV2SI, V2SI, CONST)
DEF_POINTER_TYPE (PCV2DF, V2DF, CONST)
DEF_POINTER_TYPE (PCV2SF, V2SF, CONST)
DEF_POINTER_TYPE (PCV4DF, V4DF, CONST)
@@ -175,6 +177,7 @@ DEF_FUNCTION_TYPE (V2SF, V2SI)
DEF_FUNCTION_TYPE (V2SI, V2DF)
DEF_FUNCTION_TYPE (V2SI, V2SF)
DEF_FUNCTION_TYPE (V2SI, V2SI)
+DEF_FUNCTION_TYPE (V2SI, PCV2SI)
DEF_FUNCTION_TYPE (V2SI, V4SF)
DEF_FUNCTION_TYPE (V32QI, PCCHAR)
DEF_FUNCTION_TYPE (V4DF, PCDOUBLE)
@@ -188,6 +191,7 @@ DEF_FUNCTION_TYPE (V4SF, PCFLOAT)
DEF_FUNCTION_TYPE (V4SF, V2DF)
DEF_FUNCTION_TYPE (V4SF, V4DF)
DEF_FUNCTION_TYPE (V4SF, V4SF)
+DEF_FUNCTION_TYPE (V4SF, PCV4SF)
DEF_FUNCTION_TYPE (V4SF, V4SI)
DEF_FUNCTION_TYPE (V4SF, V8SF)
DEF_FUNCTION_TYPE (V4SF, V8HI)
@@ -203,6 +207,7 @@ DEF_FUNCTION_TYPE (V8HI, V8HI)
DEF_FUNCTION_TYPE (V8QI, V8QI)
DEF_FUNCTION_TYPE (V8SF, PCFLOAT)
DEF_FUNCTION_TYPE (V8SF, PCV4SF)
+DEF_FUNCTION_TYPE (V8SF, PCV8SF)
DEF_FUNCTION_TYPE (V8SF, V4SF)
DEF_FUNCTION_TYPE (V8SF, V8SF)
DEF_FUNCTION_TYPE (V8SF, V8SI)
@@ -353,9 +358,12 @@ DEF_FUNCTION_TYPE (VOID, PFLOAT, V4SF)
DEF_FUNCTION_TYPE (VOID, PFLOAT, V8SF)
DEF_FUNCTION_TYPE (VOID, PINT, INT)
DEF_FUNCTION_TYPE (VOID, PULONGLONG, ULONGLONG)
+DEF_FUNCTION_TYPE (VOID, PV2SI, V2SI)
DEF_FUNCTION_TYPE (VOID, PV2DI, V2DI)
DEF_FUNCTION_TYPE (VOID, PV2SF, V4SF)
DEF_FUNCTION_TYPE (VOID, PV4DI, V4DI)
+DEF_FUNCTION_TYPE (VOID, PV4SF, V4SF)
+DEF_FUNCTION_TYPE (VOID, PV8SF, V8SF)
DEF_FUNCTION_TYPE (VOID, UNSIGNED, UNSIGNED)
DEF_FUNCTION_TYPE (INT, V16QI, V16QI, INT)
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index bce100a138c..799e12b2b14 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -5028,6 +5028,40 @@ ix86_handle_cconv_attribute (tree *node, tree name,
return NULL_TREE;
}
+/* The transactional memory builtins are implicitly regparm or fastcall
+ depending on the ABI. Override the generic do-nothing attribute that
+ these builtins were declared with, and replace it with one of the two
+ attributes that we expect elsewhere. */
+
+static tree
+ix86_handle_tm_regparm_attribute (tree *node, tree name ATTRIBUTE_UNUSED,
+ tree args ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED,
+ bool *no_add_attrs)
+{
+ tree alt;
+
+ /* In no case do we want to add the placeholder attribute. */
+ *no_add_attrs = true;
+
+ /* The 64-bit ABI is unchanged for transactional memory. */
+ if (TARGET_64BIT)
+ return NULL_TREE;
+
+ /* ??? Is there a better way to validate 32-bit windows? We have
+ cfun->machine->call_abi, but that seems to be set only for 64-bit. */
+ if (CHECK_STACK_LIMIT > 0)
+ alt = tree_cons (get_identifier ("fastcall"), NULL, NULL);
+ else
+ {
+ alt = tree_cons (NULL, build_int_cst (NULL, 2), NULL);
+ alt = tree_cons (get_identifier ("regparm"), alt, NULL);
+ }
+ decl_attributes (node, alt, flags);
+
+ return NULL_TREE;
+}
+
/* This function determines from TYPE the calling-convention. */
unsigned int
@@ -26790,6 +26824,154 @@ static const struct builtin_description bdesc_multi_arg[] =
{ OPTION_MASK_ISA_XOP, CODE_FOR_xop_vpermil2v8sf3, "__builtin_ia32_vpermil2ps256", IX86_BUILTIN_VPERMIL2PS256, UNKNOWN, (int)MULTI_ARG_4_SF2_SI_I1 },
};
+
+/* TM vector builtins. */
+
+/* Reuse the existing x86-specific `struct builtin_description' cause
+ we're lazy. Add casts to make them fit. */
+static const struct builtin_description bdesc_tm[] =
+{
+ { OPTION_MASK_ISA_MMX, CODE_FOR_nothing, "__builtin__ITM_WM64", (enum ix86_builtins) BUILT_IN_TM_STORE_M64, UNKNOWN, VOID_FTYPE_PV2SI_V2SI },
+ { OPTION_MASK_ISA_MMX, CODE_FOR_nothing, "__builtin__ITM_WaRM64", (enum ix86_builtins) BUILT_IN_TM_STORE_WAR_M64, UNKNOWN, VOID_FTYPE_PV2SI_V2SI },
+ { OPTION_MASK_ISA_MMX, CODE_FOR_nothing, "__builtin__ITM_WaWM64", (enum ix86_builtins) BUILT_IN_TM_STORE_WAW_M64, UNKNOWN, VOID_FTYPE_PV2SI_V2SI },
+ { OPTION_MASK_ISA_MMX, CODE_FOR_nothing, "__builtin__ITM_RM64", (enum ix86_builtins) BUILT_IN_TM_LOAD_M64, UNKNOWN, V2SI_FTYPE_PCV2SI },
+ { OPTION_MASK_ISA_MMX, CODE_FOR_nothing, "__builtin__ITM_RaRM64", (enum ix86_builtins) BUILT_IN_TM_LOAD_RAR_M64, UNKNOWN, V2SI_FTYPE_PCV2SI },
+ { OPTION_MASK_ISA_MMX, CODE_FOR_nothing, "__builtin__ITM_RaWM64", (enum ix86_builtins) BUILT_IN_TM_LOAD_RAW_M64, UNKNOWN, V2SI_FTYPE_PCV2SI },
+ { OPTION_MASK_ISA_MMX, CODE_FOR_nothing, "__builtin__ITM_RfWM64", (enum ix86_builtins) BUILT_IN_TM_LOAD_RFW_M64, UNKNOWN, V2SI_FTYPE_PCV2SI },
+
+ { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin__ITM_WM128", (enum ix86_builtins) BUILT_IN_TM_STORE_M128, UNKNOWN, VOID_FTYPE_PV4SF_V4SF },
+ { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin__ITM_WaRM128", (enum ix86_builtins) BUILT_IN_TM_STORE_WAR_M128, UNKNOWN, VOID_FTYPE_PV4SF_V4SF },
+ { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin__ITM_WaWM128", (enum ix86_builtins) BUILT_IN_TM_STORE_WAW_M128, UNKNOWN, VOID_FTYPE_PV4SF_V4SF },
+ { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin__ITM_RM128", (enum ix86_builtins) BUILT_IN_TM_LOAD_M128, UNKNOWN, V4SF_FTYPE_PCV4SF },
+ { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin__ITM_RaRM128", (enum ix86_builtins) BUILT_IN_TM_LOAD_RAR_M128, UNKNOWN, V4SF_FTYPE_PCV4SF },
+ { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin__ITM_RaWM128", (enum ix86_builtins) BUILT_IN_TM_LOAD_RAW_M128, UNKNOWN, V4SF_FTYPE_PCV4SF },
+ { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin__ITM_RfWM128", (enum ix86_builtins) BUILT_IN_TM_LOAD_RFW_M128, UNKNOWN, V4SF_FTYPE_PCV4SF },
+
+ { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin__ITM_WM256", (enum ix86_builtins) BUILT_IN_TM_STORE_M256, UNKNOWN, VOID_FTYPE_PV8SF_V8SF },
+ { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin__ITM_WaRM256", (enum ix86_builtins) BUILT_IN_TM_STORE_WAR_M256, UNKNOWN, VOID_FTYPE_PV8SF_V8SF },
+ { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin__ITM_WaWM256", (enum ix86_builtins) BUILT_IN_TM_STORE_WAW_M256, UNKNOWN, VOID_FTYPE_PV8SF_V8SF },
+ { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin__ITM_RM256", (enum ix86_builtins) BUILT_IN_TM_LOAD_M256, UNKNOWN, V8SF_FTYPE_PCV8SF },
+ { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin__ITM_RaRM256", (enum ix86_builtins) BUILT_IN_TM_LOAD_RAR_M256, UNKNOWN, V8SF_FTYPE_PCV8SF },
+ { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin__ITM_RaWM256", (enum ix86_builtins) BUILT_IN_TM_LOAD_RAW_M256, UNKNOWN, V8SF_FTYPE_PCV8SF },
+ { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin__ITM_RfWM256", (enum ix86_builtins) BUILT_IN_TM_LOAD_RFW_M256, UNKNOWN, V8SF_FTYPE_PCV8SF },
+
+ { OPTION_MASK_ISA_MMX, CODE_FOR_nothing, "__builtin__ITM_LM64", (enum ix86_builtins) BUILT_IN_TM_LOG_M64, UNKNOWN, VOID_FTYPE_PCVOID },
+ { OPTION_MASK_ISA_SSE, CODE_FOR_nothing, "__builtin__ITM_LM128", (enum ix86_builtins) BUILT_IN_TM_LOG_M128, UNKNOWN, VOID_FTYPE_PCVOID },
+ { OPTION_MASK_ISA_AVX, CODE_FOR_nothing, "__builtin__ITM_LM256", (enum ix86_builtins) BUILT_IN_TM_LOG_M256, UNKNOWN, VOID_FTYPE_PCVOID },
+};
+
+/* TM callbacks. */
+
+/* Return the builtin decl needed to load a vector of TYPE. */
+
+static tree
+ix86_builtin_tm_load (tree type)
+{
+ if (TREE_CODE (type) == VECTOR_TYPE)
+ {
+ switch (tree_low_cst (TYPE_SIZE (type), 1))
+ {
+ case 64:
+ return builtin_decl_explicit (BUILT_IN_TM_LOAD_M64);
+ case 128:
+ return builtin_decl_explicit (BUILT_IN_TM_LOAD_M128);
+ case 256:
+ return builtin_decl_explicit (BUILT_IN_TM_LOAD_M256);
+ }
+ }
+ return NULL_TREE;
+}
+
+/* Return the builtin decl needed to store a vector of TYPE. */
+
+static tree
+ix86_builtin_tm_store (tree type)
+{
+ if (TREE_CODE (type) == VECTOR_TYPE)
+ {
+ switch (tree_low_cst (TYPE_SIZE (type), 1))
+ {
+ case 64:
+ return builtin_decl_explicit (BUILT_IN_TM_STORE_M64);
+ case 128:
+ return builtin_decl_explicit (BUILT_IN_TM_STORE_M128);
+ case 256:
+ return builtin_decl_explicit (BUILT_IN_TM_STORE_M256);
+ }
+ }
+ return NULL_TREE;
+}
+
+/* Initialize the transactional memory vector load/store builtins. */
+
+static void
+ix86_init_tm_builtins (void)
+{
+ enum ix86_builtin_func_type ftype;
+ const struct builtin_description *d;
+ size_t i;
+ tree decl;
+ tree attrs_load, attrs_type_load, attrs_store, attrs_type_store;
+ tree attrs_log, attrs_type_log;
+
+ if (!flag_tm)
+ return;
+
+ /* Use whatever attributes a normal TM load has. */
+ decl = builtin_decl_explicit (BUILT_IN_TM_LOAD_1);
+ attrs_load = DECL_ATTRIBUTES (decl);
+ attrs_type_load = TYPE_ATTRIBUTES (TREE_TYPE (decl));
+ /* Use whatever attributes a normal TM store has. */
+ decl = builtin_decl_explicit (BUILT_IN_TM_STORE_1);
+ attrs_store = DECL_ATTRIBUTES (decl);
+ attrs_type_store = TYPE_ATTRIBUTES (TREE_TYPE (decl));
+ /* Use whatever attributes a normal TM log has. */
+ decl = builtin_decl_explicit (BUILT_IN_TM_LOG);
+ attrs_log = DECL_ATTRIBUTES (decl);
+ attrs_type_log = TYPE_ATTRIBUTES (TREE_TYPE (decl));
+
+ for (i = 0, d = bdesc_tm;
+ i < ARRAY_SIZE (bdesc_tm);
+ i++, d++)
+ {
+ if ((d->mask & ix86_isa_flags) != 0
+ || (lang_hooks.builtin_function
+ == lang_hooks.builtin_function_ext_scope))
+ {
+ tree type, attrs, attrs_type;
+ enum built_in_function code = (enum built_in_function) d->code;
+
+ ftype = (enum ix86_builtin_func_type) d->flag;
+ type = ix86_get_builtin_func_type (ftype);
+
+ if (BUILTIN_TM_LOAD_P (code))
+ {
+ attrs = attrs_load;
+ attrs_type = attrs_type_load;
+ }
+ else if (BUILTIN_TM_STORE_P (code))
+ {
+ attrs = attrs_store;
+ attrs_type = attrs_type_store;
+ }
+ else
+ {
+ attrs = attrs_log;
+ attrs_type = attrs_type_log;
+ }
+ decl = add_builtin_function (d->name, type, code, BUILT_IN_NORMAL,
+ /* The builtin without the prefix for
+ calling it directly. */
+ d->name + strlen ("__builtin_"),
+ attrs);
+ /* add_builtin_function() will set the DECL_ATTRIBUTES, now
+ set the TYPE_ATTRIBUTES. */
+ decl_attributes (&TREE_TYPE (decl), attrs_type, ATTR_FLAG_BUILT_IN);
+
+ set_builtin_decl (code, decl, false);
+ }
+ }
+}
/* Set up all the MMX/SSE builtins, even builtins for instructions that are not
in the current target ISA to allow the user to compile particular modules
@@ -27163,6 +27345,7 @@ ix86_init_builtins (void)
TREE_READONLY (t) = 1;
ix86_builtins[(int) IX86_BUILTIN_COPYSIGNQ] = t;
+ ix86_init_tm_builtins ();
ix86_init_mmx_sse_builtins ();
if (TARGET_LP64)
@@ -29921,7 +30104,6 @@ avx_vperm2f128_parallel (rtx par, enum machine_mode mode)
return mask + 1;
}
-
/* Store OPERAND to the memory after reload is completed. This means
that we can't easily use assign_stack_local. */
rtx
@@ -34784,6 +34966,11 @@ static const struct attribute_spec ix86_attribute_table[] =
for FP arguments. */
{ "sseregparm", 0, 0, false, true, true, ix86_handle_cconv_attribute,
true },
+ /* The transactional memory builtins are implicitly regparm or fastcall
+ depending on the ABI. Override the generic do-nothing attribute that
+ these builtins were declared with. */
+ { "*tm regparm", 0, 0, false, true, true, ix86_handle_tm_regparm_attribute,
+ true },
/* force_align_arg_pointer says this function realigns the stack at entry. */
{ (const char *)&ix86_force_align_arg_pointer_string, 0, 0,
false, true, true, ix86_handle_cconv_attribute, false },
@@ -37954,6 +38141,12 @@ ix86_autovectorize_vector_sizes (void)
#define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
ix86_builtin_vectorized_function
+#undef TARGET_VECTORIZE_BUILTIN_TM_LOAD
+#define TARGET_VECTORIZE_BUILTIN_TM_LOAD ix86_builtin_tm_load
+
+#undef TARGET_VECTORIZE_BUILTIN_TM_STORE
+#define TARGET_VECTORIZE_BUILTIN_TM_STORE ix86_builtin_tm_store
+
#undef TARGET_VECTORIZE_BUILTIN_GATHER
#define TARGET_VECTORIZE_BUILTIN_GATHER ix86_vectorize_builtin_gather
diff --git a/gcc/cp/ChangeLog b/gcc/cp/ChangeLog
index bcdecdf4cc3..15b44b5cfb8 100644
--- a/gcc/cp/ChangeLog
+++ b/gcc/cp/ChangeLog
@@ -1,3 +1,40 @@
+2011-11-07 Richard Henderson <rth@redhat.com>
+ Aldy Hernandez <aldyh@redhat.com>
+ Torvald Riegel <triegel@redhat.com>
+
+ Merged from transactional-memory.
+
+ * call.c (build_new_function_call): Call tm_malloc_replacement.
+ * class.c (check_bases): Compute transaction attributes for the
+ class based on its base classes.
+ (look_for_tm_attr_overrides, set_one_vmethod_tm_attributes,
+ set_method_tm_attributes): New.
+ (finish_struct_1): Call set_method_tm_attributes.
+ * cp-tree.h (begin_transaction_stmt, finish_transaction_stmt,
+ build_transaction_expr): Declare.
+ (TRANSACTION_EXPR_IS_STMT): New.
+ * decl.c (push_cp_library_fn): Set attribute to transaction_safe.
+ * except.c (do_get_exception_ptr): Apply transaction_pure.
+ (do_begin_catch): Mark _ITM_cxa_begin_catch transaction_pure and
+ record as transactional-memory wrapper.
+ (do_end_catch): Similarly for _ITM_cxa_end_catch.
+ (do_allocate_exception): Similarly for _ITM_cxa_allocate_exception.
+ (build_throw): Similarly for _ITM_cxa_throw. Make __cxa_rethrow pure.
+ * parser.h (struct cp_parser): Add in_transaction flag.
+ * parser.c (enum non_integral_constant): Add NIC_TRANSACTION.
+ (cp_parser_non_integral_constant_expression): Handle NIC_TRANSACTION.
+ (enum required_token): Add transaction tokens.
+ (cp_parser_transaction, cp_parser_transaction_expression,
+ cp_parser_function_transaction, cp_parser_transaction_cancel,
+ cp_parser_txn_attribute_opt): New.
+ (cp_parser_unary_expression): Handle RID_TRANSACTION*.
+ (cp_parser_statement, cp_parser_function_definition_after_declarator,
+ cp_parser_token_starts_function_definition_p): Same.
+ (cp_parser_required_error): Handle RT_TRANSACTION*.
+ * pt.c (tsubst_expr): Handle TRANSACTION_EXPR.
+ * semantics.c (begin_transaction_stmt, finish_transaction_stmt,
+ build_transaction_expr): New.
+
2011-11-08 Dodji Seketeli <dodji@redhat.com>
Fix context handling of alias-declaration
diff --git a/gcc/cp/call.c b/gcc/cp/call.c
index 2bf22f96ed8..578905e41e6 100644
--- a/gcc/cp/call.c
+++ b/gcc/cp/call.c
@@ -3826,6 +3826,9 @@ build_new_function_call (tree fn, VEC(tree,gc) **args, bool koenig_p,
return error_mark_node;
}
+ if (flag_tm)
+ tm_malloc_replacement (fn);
+
/* If this function was found without using argument dependent
lookup, then we want to ignore any undeclared friend
functions. */
diff --git a/gcc/cp/class.c b/gcc/cp/class.c
index 1775868aa8a..be632be64c8 100644
--- a/gcc/cp/class.c
+++ b/gcc/cp/class.c
@@ -1227,13 +1227,12 @@ check_bases (tree t,
int* no_const_asn_ref_p)
{
int i;
- int seen_non_virtual_nearly_empty_base_p;
+ bool seen_non_virtual_nearly_empty_base_p = 0;
+ int seen_tm_mask = 0;
tree base_binfo;
tree binfo;
tree field = NULL_TREE;
- seen_non_virtual_nearly_empty_base_p = 0;
-
if (!CLASSTYPE_NON_STD_LAYOUT (t))
for (field = TYPE_FIELDS (t); field; field = DECL_CHAIN (field))
if (TREE_CODE (field) == FIELD_DECL)
@@ -1338,6 +1337,23 @@ check_bases (tree t,
break;
}
}
+
+ /* Don't bother collecting tm attributes if transactional memory
+ support is not enabled. */
+ if (flag_tm)
+ {
+ tree tm_attr = find_tm_attribute (TYPE_ATTRIBUTES (basetype));
+ if (tm_attr)
+ seen_tm_mask |= tm_attr_to_mask (tm_attr);
+ }
+ }
+
+ /* If one of the base classes had TM attributes, and the current class
+ doesn't define its own, then the current class inherits one. */
+ if (seen_tm_mask && !find_tm_attribute (TYPE_ATTRIBUTES (t)))
+ {
+ tree tm_attr = tm_mask_to_attr (seen_tm_mask & -seen_tm_mask);
+ TYPE_ATTRIBUTES (t) = tree_cons (tm_attr, NULL, TYPE_ATTRIBUTES (t));
}
}
@@ -4258,6 +4274,137 @@ clone_constructors_and_destructors (tree t)
clone_function_decl (OVL_CURRENT (fns), /*update_method_vec_p=*/1);
}
+/* Subroutine of set_one_vmethod_tm_attributes. Search base classes
+ of TYPE for virtual functions which FNDECL overrides. Return a
+ mask of the tm attributes found therein. */
+
+static int
+look_for_tm_attr_overrides (tree type, tree fndecl)
+{
+ tree binfo = TYPE_BINFO (type);
+ tree base_binfo;
+ int ix, found = 0;
+
+ for (ix = 0; BINFO_BASE_ITERATE (binfo, ix, base_binfo); ++ix)
+ {
+ tree o, basetype = BINFO_TYPE (base_binfo);
+
+ if (!TYPE_POLYMORPHIC_P (basetype))
+ continue;
+
+ o = look_for_overrides_here (basetype, fndecl);
+ if (o)
+ found |= tm_attr_to_mask (find_tm_attribute
+ (TYPE_ATTRIBUTES (TREE_TYPE (o))));
+ else
+ found |= look_for_tm_attr_overrides (basetype, fndecl);
+ }
+
+ return found;
+}
+
+/* Subroutine of set_method_tm_attributes. Handle the checks and
+ inheritance for one virtual method FNDECL. */
+
+static void
+set_one_vmethod_tm_attributes (tree type, tree fndecl)
+{
+ tree tm_attr;
+ int found, have;
+
+ found = look_for_tm_attr_overrides (type, fndecl);
+
+ /* If FNDECL doesn't actually override anything (i.e. T is the
+ class that first declares FNDECL virtual), then we're done. */
+ if (found == 0)
+ return;
+
+ tm_attr = find_tm_attribute (TYPE_ATTRIBUTES (TREE_TYPE (fndecl)));
+ have = tm_attr_to_mask (tm_attr);
+
+ /* Intel STM Language Extension 3.0, Section 4.2 table 4:
+ tm_pure must match exactly, otherwise no weakening of
+ tm_safe > tm_callable > nothing. */
+ /* ??? The tm_pure attribute didn't make the transition to the
+ multivendor language spec. */
+ if (have == TM_ATTR_PURE)
+ {
+ if (found != TM_ATTR_PURE)
+ {
+ found &= -found;
+ goto err_override;
+ }
+ }
+ /* If the overridden function is tm_pure, then FNDECL must be. */
+ else if (found == TM_ATTR_PURE && tm_attr)
+ goto err_override;
+ /* Look for base class combinations that cannot be satisfied. */
+ else if (found != TM_ATTR_PURE && (found & TM_ATTR_PURE))
+ {
+ found &= ~TM_ATTR_PURE;
+ found &= -found;
+ error_at (DECL_SOURCE_LOCATION (fndecl),
+ "method overrides both %<transaction_pure%> and %qE methods",
+ tm_mask_to_attr (found));
+ }
+ /* If FNDECL did not declare an attribute, then inherit the most
+ restrictive one. */
+ else if (tm_attr == NULL)
+ {
+ apply_tm_attr (fndecl, tm_mask_to_attr (found & -found));
+ }
+ /* Otherwise validate that we're not weaker than a function
+ that is being overridden. */
+ else
+ {
+ found &= -found;
+ if (found <= TM_ATTR_CALLABLE && have > found)
+ goto err_override;
+ }
+ return;
+
+ err_override:
+ error_at (DECL_SOURCE_LOCATION (fndecl),
+ "method declared %qE overriding %qE method",
+ tm_attr, tm_mask_to_attr (found));
+}
+
+/* For each of the methods in T, propagate a class-level tm attribute. */
+
+static void
+set_method_tm_attributes (tree t)
+{
+ tree class_tm_attr, fndecl;
+
+ /* Don't bother collecting tm attributes if transactional memory
+ support is not enabled. */
+ if (!flag_tm)
+ return;
+
+ /* Process virtual methods first, as they inherit directly from the
+ base virtual function and also require validation of new attributes. */
+ if (TYPE_CONTAINS_VPTR_P (t))
+ {
+ tree vchain;
+ for (vchain = BINFO_VIRTUALS (TYPE_BINFO (t)); vchain;
+ vchain = TREE_CHAIN (vchain))
+ set_one_vmethod_tm_attributes (t, BV_FN (vchain));
+ }
+
+ /* If the class doesn't have an attribute, nothing more to do. */
+ class_tm_attr = find_tm_attribute (TYPE_ATTRIBUTES (t));
+ if (class_tm_attr == NULL)
+ return;
+
+ /* Any method that does not yet have a tm attribute inherits
+ the one from the class. */
+ for (fndecl = TYPE_METHODS (t); fndecl; fndecl = TREE_CHAIN (fndecl))
+ {
+ if (!find_tm_attribute (TYPE_ATTRIBUTES (TREE_TYPE (fndecl))))
+ apply_tm_attr (fndecl, class_tm_attr);
+ }
+}
+
/* Returns true iff class T has a user-defined constructor other than
the default constructor. */
@@ -5841,6 +5988,7 @@ finish_struct_1 (tree t)
}
finish_struct_bits (t);
+ set_method_tm_attributes (t);
/* Complete the rtl for any static member objects of the type we're
working on. */
diff --git a/gcc/cp/cp-tree.h b/gcc/cp/cp-tree.h
index 177f10084af..32d08caf9cc 100644
--- a/gcc/cp/cp-tree.h
+++ b/gcc/cp/cp-tree.h
@@ -73,6 +73,7 @@ c-common.h, not after.
VEC_INIT_EXPR_IS_CONSTEXPR (in VEC_INIT_EXPR)
DECL_OVERRIDE_P (in FUNCTION_DECL)
IMPLICIT_CONV_EXPR_DIRECT_INIT (in IMPLICIT_CONV_EXPR)
+ TRANSACTION_EXPR_IS_STMT (in TRANSACTION_EXPR)
1: IDENTIFIER_VIRTUAL_P (in IDENTIFIER_NODE)
TI_PENDING_TEMPLATE_FLAG.
TEMPLATE_PARMS_FOR_INLINE.
@@ -3890,6 +3891,10 @@ more_aggr_init_expr_args_p (const aggr_init_expr_arg_iterator *iter)
TREE_TYPE (OMP_CLAUSE_RANGE_CHECK (NODE, OMP_CLAUSE_PRIVATE, \
OMP_CLAUSE_COPYPRIVATE))
+/* Nonzero if this transaction expression's body contains statements. */
+#define TRANSACTION_EXPR_IS_STMT(NODE) \
+ TREE_LANG_FLAG_0 (TRANSACTION_EXPR_CHECK (NODE))
+
/* These macros provide convenient access to the various _STMT nodes
created when parsing template declarations. */
#define TRY_STMTS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 0)
@@ -5556,6 +5561,9 @@ extern void finish_omp_atomic (enum tree_code, enum tree_code,
extern void finish_omp_barrier (void);
extern void finish_omp_flush (void);
extern void finish_omp_taskwait (void);
+extern tree begin_transaction_stmt (location_t, tree *, int);
+extern void finish_transaction_stmt (tree, tree, int);
+extern tree build_transaction_expr (location_t, tree, int);
extern void finish_omp_taskyield (void);
extern bool cxx_omp_create_clause_info (tree, tree, bool, bool, bool);
extern tree baselink_for_fns (tree);
diff --git a/gcc/cp/decl.c b/gcc/cp/decl.c
index 1c337763110..1c46adf99a1 100644
--- a/gcc/cp/decl.c
+++ b/gcc/cp/decl.c
@@ -4001,6 +4001,8 @@ push_cp_library_fn (enum tree_code operator_code, tree type)
operator_code,
type);
pushdecl (fn);
+ if (flag_tm)
+ apply_tm_attr (fn, get_identifier ("transaction_safe"));
return fn;
}
diff --git a/gcc/cp/except.c b/gcc/cp/except.c
index e529685c992..670a66f1611 100644
--- a/gcc/cp/except.c
+++ b/gcc/cp/except.c
@@ -173,6 +173,9 @@ do_get_exception_ptr (void)
{
/* Declare void* __cxa_get_exception_ptr (void *) throw(). */
fn = declare_nothrow_library_fn (fn, ptr_type_node, ptr_type_node);
+
+ if (flag_tm)
+ apply_tm_attr (fn, get_identifier ("transaction_pure"));
}
return cp_build_function_call_nary (fn, tf_warning_or_error,
@@ -192,6 +195,17 @@ do_begin_catch (void)
{
/* Declare void* __cxa_begin_catch (void *) throw(). */
fn = declare_nothrow_library_fn (fn, ptr_type_node, ptr_type_node);
+
+ /* Create its transactional-memory equivalent. */
+ if (flag_tm)
+ {
+ tree fn2 = get_identifier ("_ITM_cxa_begin_catch");
+ if (!get_global_value_if_present (fn2, &fn2))
+ fn2 = declare_nothrow_library_fn (fn2, ptr_type_node,
+ ptr_type_node);
+ apply_tm_attr (fn2, get_identifier ("transaction_pure"));
+ record_tm_replacement (fn, fn2);
+ }
}
return cp_build_function_call_nary (fn, tf_warning_or_error,
@@ -231,6 +245,19 @@ do_end_catch (tree type)
fn = push_void_library_fn (fn, void_list_node);
/* This can throw if the destructor for the exception throws. */
TREE_NOTHROW (fn) = 0;
+
+ /* Create its transactional-memory equivalent. */
+ if (flag_tm)
+ {
+ tree fn2 = get_identifier ("_ITM_cxa_end_catch");
+ if (!get_global_value_if_present (fn2, &fn2))
+ {
+ fn2 = push_void_library_fn (fn2, void_list_node);
+ TREE_NOTHROW (fn2) = 0;
+ }
+ apply_tm_attr (fn2, get_identifier ("transaction_pure"));
+ record_tm_replacement (fn, fn2);
+ }
}
cleanup = cp_build_function_call_vec (fn, NULL, tf_warning_or_error);
@@ -581,6 +608,16 @@ do_allocate_exception (tree type)
{
/* Declare void *__cxa_allocate_exception(size_t) throw(). */
fn = declare_nothrow_library_fn (fn, ptr_type_node, size_type_node);
+
+ if (flag_tm)
+ {
+ tree fn2 = get_identifier ("_ITM_cxa_allocate_exception");
+ if (!get_global_value_if_present (fn2, &fn2))
+ fn2 = declare_nothrow_library_fn (fn2, ptr_type_node,
+ size_type_node);
+ apply_tm_attr (fn2, get_identifier ("transaction_pure"));
+ record_tm_replacement (fn, fn2);
+ }
}
return cp_build_function_call_nary (fn, tf_warning_or_error,
@@ -712,6 +749,15 @@ build_throw (tree exp)
ptr_type_node, ptr_type_node,
cleanup_type, NULL_TREE);
fn = push_throw_library_fn (fn, tmp);
+
+ if (flag_tm)
+ {
+ tree fn2 = get_identifier ("_ITM_cxa_throw");
+ if (!get_global_value_if_present (fn2, &fn2))
+ fn2 = push_throw_library_fn (fn2, tmp);
+ apply_tm_attr (fn2, get_identifier ("transaction_pure"));
+ record_tm_replacement (fn, fn2);
+ }
}
/* [except.throw]
@@ -831,6 +877,9 @@ build_throw (tree exp)
(fn, build_function_type_list (void_type_node, NULL_TREE));
}
+ if (flag_tm)
+ apply_tm_attr (fn, get_identifier ("transaction_pure"));
+
/* ??? Indicate that this function call allows exceptions of the type
of the enclosing catch block (if known). */
exp = cp_build_function_call_vec (fn, NULL, tf_warning_or_error);
diff --git a/gcc/cp/parser.c b/gcc/cp/parser.c
index 3d3587794f2..12f3c4011fa 100644
--- a/gcc/cp/parser.c
+++ b/gcc/cp/parser.c
@@ -106,7 +106,9 @@ typedef enum non_integral_constant {
/* a comma operator */
NIC_COMMA,
/* a call to a constructor */
- NIC_CONSTRUCTOR
+ NIC_CONSTRUCTOR,
+ /* a transaction expression */
+ NIC_TRANSACTION
} non_integral_constant;
/* The various kinds of errors about name-lookup failing. */
@@ -171,7 +173,10 @@ typedef enum required_token {
RT_INTERATION, /* iteration-statement */
RT_JUMP, /* jump-statement */
RT_CLASS_KEY, /* class-key */
- RT_CLASS_TYPENAME_TEMPLATE /* class, typename, or template */
+ RT_CLASS_TYPENAME_TEMPLATE, /* class, typename, or template */
+ RT_TRANSACTION_ATOMIC, /* __transaction_atomic */
+ RT_TRANSACTION_RELAXED, /* __transaction_relaxed */
+ RT_TRANSACTION_CANCEL /* __transaction_cancel */
} required_token;
/* Prototypes. */
@@ -2106,6 +2111,17 @@ static bool cp_parser_extension_opt
static void cp_parser_label_declaration
(cp_parser *);
+/* Transactional Memory Extensions */
+
+static tree cp_parser_transaction
+ (cp_parser *, enum rid);
+static tree cp_parser_transaction_expression
+ (cp_parser *, enum rid);
+static bool cp_parser_function_transaction
+ (cp_parser *, enum rid);
+static tree cp_parser_transaction_cancel
+ (cp_parser *);
+
enum pragma_context { pragma_external, pragma_stmt, pragma_compound };
static bool cp_parser_pragma
(cp_parser *, enum pragma_context);
@@ -2671,6 +2687,10 @@ cp_parser_non_integral_constant_expression (cp_parser *parser,
error ("a call to a constructor "
"cannot appear in a constant-expression");
return true;
+ case NIC_TRANSACTION:
+ error ("a transaction expression "
+ "cannot appear in a constant-expression");
+ return true;
case NIC_THIS:
msg = "this";
break;
@@ -6372,6 +6392,10 @@ cp_parser_unary_expression (cp_parser *parser, bool address_p, bool cast_p,
}
break;
+ case RID_TRANSACTION_ATOMIC:
+ case RID_TRANSACTION_RELAXED:
+ return cp_parser_transaction_expression (parser, keyword);
+
case RID_NOEXCEPT:
{
tree expr;
@@ -8506,6 +8530,11 @@ cp_parser_lambda_body (cp_parser* parser, tree lambda_expr)
declaration-statement
try-block
+ TM Extension:
+
+ statement:
+ atomic-statement
+
IN_COMPOUND is true when the statement is nested inside a
cp_parser_compound_statement; this matters for certain pragmas.
@@ -8582,6 +8611,14 @@ cp_parser_statement (cp_parser* parser, tree in_statement_expr,
cp_parser_declaration_statement (parser);
return;
+ case RID_TRANSACTION_ATOMIC:
+ case RID_TRANSACTION_RELAXED:
+ statement = cp_parser_transaction (parser, keyword);
+ break;
+ case RID_TRANSACTION_CANCEL:
+ statement = cp_parser_transaction_cancel (parser);
+ break;
+
default:
/* It might be a keyword like `int' that can start a
declaration-statement. */
@@ -15194,6 +15231,11 @@ cp_parser_asm_definition (cp_parser* parser)
function-definition:
__extension__ function-definition
+ TM Extension:
+
+ function-definition:
+ decl-specifier-seq [opt] declarator function-transaction-block
+
The DECL_SPECIFIERS apply to this declarator. Returns a
representation of the entity declared. If MEMBER_P is TRUE, then
this declarator appears in a class scope. The new DECL created by
@@ -20911,12 +20953,19 @@ cp_parser_function_definition_after_declarator (cp_parser* parser,
start_lambda_scope (current_function_decl);
- /* If the next token is `try', then we are looking at a
- function-try-block. */
- if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TRY))
+ /* If the next token is `try', `__transaction_atomic', or
+ `__transaction_relaxed`, then we are looking at either function-try-block
+ or function-transaction-block. Note that all of these include the
+ function-body. */
+ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TRANSACTION_ATOMIC))
+ ctor_initializer_p = cp_parser_function_transaction (parser,
+ RID_TRANSACTION_ATOMIC);
+ else if (cp_lexer_next_token_is_keyword (parser->lexer,
+ RID_TRANSACTION_RELAXED))
+ ctor_initializer_p = cp_parser_function_transaction (parser,
+ RID_TRANSACTION_RELAXED);
+ else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TRY))
ctor_initializer_p = cp_parser_function_try_block (parser);
- /* A function-try-block includes the function-body, so we only do
- this next part if we're not processing a function-try-block. */
else
ctor_initializer_p
= cp_parser_ctor_initializer_opt_and_function_body (parser);
@@ -22073,6 +22122,12 @@ cp_parser_required_error (cp_parser *parser,
case RT_AT_THROW:
cp_parser_error (parser, "expected %<@throw%>");
return;
+ case RT_TRANSACTION_ATOMIC:
+ cp_parser_error (parser, "expected %<__transaction_atomic%>");
+ return;
+ case RT_TRANSACTION_RELAXED:
+ cp_parser_error (parser, "expected %<__transaction_relaxed%>");
+ return;
default:
break;
}
@@ -22303,6 +22358,10 @@ cp_parser_token_starts_function_definition_p (cp_token* token)
|| token->type == CPP_COLON
/* A function-try-block begins with `try'. */
|| token->keyword == RID_TRY
+ /* A function-transaction-block begins with `__transaction_atomic'
+ or `__transaction_relaxed'. */
+ || token->keyword == RID_TRANSACTION_ATOMIC
+ || token->keyword == RID_TRANSACTION_RELAXED
/* The named return value extension begins with `return'. */
|| token->keyword == RID_RETURN);
}
@@ -26623,6 +26682,272 @@ cp_parser_omp_construct (cp_parser *parser, cp_token *pragma_tok)
SET_EXPR_LOCATION (stmt, pragma_tok->location);
}
+/* Transactional Memory parsing routines. */
+
+/* Parse a transaction attribute.
+
+ txn-attribute:
+ attribute
+ [ [ identifier ] ]
+
+ ??? Simplify this when C++0x bracket attributes are
+ implemented properly. */
+
+static tree
+cp_parser_txn_attribute_opt (cp_parser *parser)
+{
+ cp_token *token;
+ tree attr_name, attr = NULL;
+
+ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_ATTRIBUTE))
+ return cp_parser_attributes_opt (parser);
+
+ if (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_SQUARE))
+ return NULL_TREE;
+ cp_lexer_consume_token (parser->lexer);
+ if (!cp_parser_require (parser, CPP_OPEN_SQUARE, RT_OPEN_SQUARE))
+ goto error1;
+
+ token = cp_lexer_peek_token (parser->lexer);
+ if (token->type == CPP_NAME || token->type == CPP_KEYWORD)
+ {
+ token = cp_lexer_consume_token (parser->lexer);
+
+ attr_name = (token->type == CPP_KEYWORD
+ /* For keywords, use the canonical spelling,
+ not the parsed identifier. */
+ ? ridpointers[(int) token->keyword]
+ : token->u.value);
+ attr = build_tree_list (attr_name, NULL_TREE);
+ }
+ else
+ cp_parser_error (parser, "expected identifier");
+
+ cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
+ error1:
+ cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
+ return attr;
+}
+
+/* Parse a __transaction_atomic or __transaction_relaxed statement.
+
+ transaction-statement:
+ __transaction_atomic txn-attribute[opt] txn-exception-spec[opt]
+ compound-statement
+ __transaction_relaxed txn-exception-spec[opt] compound-statement
+
+ ??? The exception specification is not yet implemented.
+*/
+
+static tree
+cp_parser_transaction (cp_parser *parser, enum rid keyword)
+{
+ unsigned char old_in = parser->in_transaction;
+ unsigned char this_in = 1, new_in;
+ cp_token *token;
+ tree stmt, attrs;
+
+ gcc_assert (keyword == RID_TRANSACTION_ATOMIC
+ || keyword == RID_TRANSACTION_RELAXED);
+ token = cp_parser_require_keyword (parser, keyword,
+ (keyword == RID_TRANSACTION_ATOMIC ? RT_TRANSACTION_ATOMIC
+ : RT_TRANSACTION_RELAXED));
+ gcc_assert (token != NULL);
+
+ if (keyword == RID_TRANSACTION_RELAXED)
+ this_in |= TM_STMT_ATTR_RELAXED;
+ else
+ {
+ attrs = cp_parser_txn_attribute_opt (parser);
+ if (attrs)
+ this_in |= parse_tm_stmt_attr (attrs, TM_STMT_ATTR_OUTER);
+ }
+
+ /* Keep track if we're in the lexical scope of an outer transaction. */
+ new_in = this_in | (old_in & TM_STMT_ATTR_OUTER);
+
+ stmt = begin_transaction_stmt (token->location, NULL, this_in);
+
+ parser->in_transaction = new_in;
+ cp_parser_compound_statement (parser, NULL, false, false);
+ parser->in_transaction = old_in;
+
+ finish_transaction_stmt (stmt, NULL, this_in);
+
+ return stmt;
+}
+
+/* Parse a __transaction_atomic or __transaction_relaxed expression.
+
+ transaction-expression:
+ __transaction_atomic txn-exception-spec[opt] ( expression )
+ __transaction_relaxed txn-exception-spec[opt] ( expression )
+
+ ??? The exception specification is not yet implemented.
+*/
+
+static tree
+cp_parser_transaction_expression (cp_parser *parser, enum rid keyword)
+{
+ unsigned char old_in = parser->in_transaction;
+ unsigned char this_in = 1;
+ cp_token *token;
+ tree ret;
+
+ gcc_assert (keyword == RID_TRANSACTION_ATOMIC
+ || keyword == RID_TRANSACTION_RELAXED);
+
+ if (!flag_tm)
+ error (keyword == RID_TRANSACTION_RELAXED
+ ? G_("%<__transaction_relaxed%> without transactional memory "
+ "support enabled")
+ : G_("%<__transaction_atomic%> without transactional memory "
+ "support enabled"));
+
+ token = cp_parser_require_keyword (parser, keyword,
+ (keyword == RID_TRANSACTION_ATOMIC ? RT_TRANSACTION_ATOMIC
+ : RT_TRANSACTION_RELAXED));
+ gcc_assert (token != NULL);
+
+ if (keyword == RID_TRANSACTION_RELAXED)
+ this_in |= TM_STMT_ATTR_RELAXED;
+
+ parser->in_transaction = this_in;
+ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
+ {
+ tree expr = cp_parser_expression (parser, /*cast_p=*/false, NULL);
+ ret = build_transaction_expr (token->location, expr, this_in);
+ }
+ else
+ {
+ cp_parser_error (parser, "expected %<(%>");
+ ret = error_mark_node;
+ }
+ parser->in_transaction = old_in;
+
+ if (cp_parser_non_integral_constant_expression (parser, NIC_TRANSACTION))
+ return error_mark_node;
+
+ return (flag_tm ? ret : error_mark_node);
+}
+
+/* Parse a function-transaction-block.
+
+ function-transaction-block:
+ __transaction_atomic txn-attribute[opt] ctor-initializer[opt]
+ function-body
+ __transaction_atomic txn-attribute[opt] function-try-block
+ __transaction_relaxed ctor-initializer[opt] function-body
+ __transaction_relaxed function-try-block
+*/
+
+static bool
+cp_parser_function_transaction (cp_parser *parser, enum rid keyword)
+{
+ unsigned char old_in = parser->in_transaction;
+ unsigned char new_in = 1;
+ tree compound_stmt, stmt, attrs;
+ bool ctor_initializer_p;
+ cp_token *token;
+
+ gcc_assert (keyword == RID_TRANSACTION_ATOMIC
+ || keyword == RID_TRANSACTION_RELAXED);
+ token = cp_parser_require_keyword (parser, keyword,
+ (keyword == RID_TRANSACTION_ATOMIC ? RT_TRANSACTION_ATOMIC
+ : RT_TRANSACTION_RELAXED));
+ gcc_assert (token != NULL);
+
+ if (keyword == RID_TRANSACTION_RELAXED)
+ new_in |= TM_STMT_ATTR_RELAXED;
+ else
+ {
+ attrs = cp_parser_txn_attribute_opt (parser);
+ if (attrs)
+ new_in |= parse_tm_stmt_attr (attrs, TM_STMT_ATTR_OUTER);
+ }
+
+ stmt = begin_transaction_stmt (token->location, &compound_stmt, new_in);
+
+ parser->in_transaction = new_in;
+
+ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TRY))
+ ctor_initializer_p = cp_parser_function_try_block (parser);
+ else
+ ctor_initializer_p
+ = cp_parser_ctor_initializer_opt_and_function_body (parser);
+
+ parser->in_transaction = old_in;
+
+ finish_transaction_stmt (stmt, compound_stmt, new_in);
+
+ return ctor_initializer_p;
+}
+
+/* Parse a __transaction_cancel statement.
+
+ cancel-statement:
+ __transaction_cancel txn-attribute[opt] ;
+ __transaction_cancel txn-attribute[opt] throw-expression ;
+
+ ??? Cancel and throw is not yet implemented. */
+
+static tree
+cp_parser_transaction_cancel (cp_parser *parser)
+{
+ cp_token *token;
+ bool is_outer = false;
+ tree stmt, attrs;
+
+ token = cp_parser_require_keyword (parser, RID_TRANSACTION_CANCEL,
+ RT_TRANSACTION_CANCEL);
+ gcc_assert (token != NULL);
+
+ attrs = cp_parser_txn_attribute_opt (parser);
+ if (attrs)
+ is_outer = (parse_tm_stmt_attr (attrs, TM_STMT_ATTR_OUTER) != 0);
+
+ /* ??? Parse cancel-and-throw here. */
+
+ cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
+
+ if (!flag_tm)
+ {
+ error_at (token->location, "%<__transaction_cancel%> without "
+ "transactional memory support enabled");
+ return error_mark_node;
+ }
+ else if (parser->in_transaction & TM_STMT_ATTR_RELAXED)
+ {
+ error_at (token->location, "%<__transaction_cancel%> within a "
+ "%<__transaction_relaxed%>");
+ return error_mark_node;
+ }
+ else if (is_outer)
+ {
+ if ((parser->in_transaction & TM_STMT_ATTR_OUTER) == 0
+ && !is_tm_may_cancel_outer (current_function_decl))
+ {
+ error_at (token->location, "outer %<__transaction_cancel%> not "
+ "within outer %<__transaction_atomic%>");
+ error_at (token->location,
+ " or a %<transaction_may_cancel_outer%> function");
+ return error_mark_node;
+ }
+ }
+ else if (parser->in_transaction == 0)
+ {
+ error_at (token->location, "%<__transaction_cancel%> not within "
+ "%<__transaction_atomic%>");
+ return error_mark_node;
+ }
+
+ stmt = build_tm_abort_call (token->location, is_outer);
+ add_stmt (stmt);
+ finish_stmt ();
+
+ return stmt;
+}
+
/* The parser. */
static GTY (()) cp_parser *the_parser;
diff --git a/gcc/cp/parser.h b/gcc/cp/parser.h
index b44d23cd8cd..5b95f0892de 100644
--- a/gcc/cp/parser.h
+++ b/gcc/cp/parser.h
@@ -329,6 +329,10 @@ typedef struct GTY(()) cp_parser {
a local class. */
bool in_function_body;
+ /* Nonzero if we're processing a __transaction_atomic or
+ __transaction_relaxed statement. */
+ unsigned char in_transaction;
+
/* TRUE if we can auto-correct a colon to a scope operator. */
bool colon_corrects_to_scope_p;
diff --git a/gcc/cp/pt.c b/gcc/cp/pt.c
index bf2a2c63735..8c91a9ed021 100644
--- a/gcc/cp/pt.c
+++ b/gcc/cp/pt.c
@@ -13108,6 +13108,28 @@ tsubst_expr (tree t, tree args, tsubst_flags_t complain, tree in_decl,
}
break;
+ case TRANSACTION_EXPR:
+ {
+ int flags = 0;
+ flags |= (TRANSACTION_EXPR_OUTER (t) ? TM_STMT_ATTR_OUTER : 0);
+ flags |= (TRANSACTION_EXPR_RELAXED (t) ? TM_STMT_ATTR_RELAXED : 0);
+
+ if (TRANSACTION_EXPR_IS_STMT (t))
+ {
+ stmt = begin_transaction_stmt (input_location, NULL, flags);
+ RECUR (TRANSACTION_EXPR_BODY (t));
+ finish_transaction_stmt (stmt, NULL, flags);
+ }
+ else
+ {
+ stmt = build_transaction_expr (EXPR_LOCATION (t),
+ RECUR (TRANSACTION_EXPR_BODY (t)),
+ flags);
+ return stmt;
+ }
+ }
+ break;
+
case EXPR_PACK_EXPANSION:
error ("invalid use of pack expansion expression");
return error_mark_node;
diff --git a/gcc/cp/semantics.c b/gcc/cp/semantics.c
index 9d6bd316aa1..508e2529fbe 100644
--- a/gcc/cp/semantics.c
+++ b/gcc/cp/semantics.c
@@ -4968,6 +4968,64 @@ finish_omp_taskyield (void)
finish_expr_stmt (stmt);
}
+/* Begin a __transaction_atomic or __transaction_relaxed statement.
+ If PCOMPOUND is non-null, this is for a function-transaction-block, and we
+ should create an extra compound stmt. */
+
+tree
+begin_transaction_stmt (location_t loc, tree *pcompound, int flags)
+{
+ tree r;
+
+ if (pcompound)
+ *pcompound = begin_compound_stmt (0);
+
+ r = build_stmt (loc, TRANSACTION_EXPR, NULL_TREE);
+
+ /* Only add the statement to the function if support enabled. */
+ if (flag_tm)
+ add_stmt (r);
+ else
+ error_at (loc, ((flags & TM_STMT_ATTR_RELAXED) != 0
+ ? G_("%<__transaction_relaxed%> without "
+ "transactional memory support enabled")
+ : G_("%<__transaction_atomic%> without "
+ "transactional memory support enabled")));
+
+ TRANSACTION_EXPR_BODY (r) = push_stmt_list ();
+ return r;
+}
+
+/* End a __transaction_atomic or __transaction_relaxed statement.
+ If COMPOUND_STMT is non-null, this is for a function-transaction-block,
+ and we should end the compound. */
+
+void
+finish_transaction_stmt (tree stmt, tree compound_stmt, int flags)
+{
+ TRANSACTION_EXPR_BODY (stmt) = pop_stmt_list (TRANSACTION_EXPR_BODY (stmt));
+ TRANSACTION_EXPR_OUTER (stmt) = (flags & TM_STMT_ATTR_OUTER) != 0;
+ TRANSACTION_EXPR_RELAXED (stmt) = (flags & TM_STMT_ATTR_RELAXED) != 0;
+ TRANSACTION_EXPR_IS_STMT (stmt) = 1;
+
+ if (compound_stmt)
+ finish_compound_stmt (compound_stmt);
+ finish_stmt ();
+}
+
+/* Build a __transaction_atomic or __transaction_relaxed expression. */
+
+tree
+build_transaction_expr (location_t loc, tree expr, int flags)
+{
+ tree ret;
+ ret = build1 (TRANSACTION_EXPR, TREE_TYPE (expr), expr);
+ if (flags & TM_STMT_ATTR_RELAXED)
+ TRANSACTION_EXPR_RELAXED (ret) = 1;
+ SET_EXPR_LOCATION (ret, loc);
+ return ret;
+}
+
void
init_cp_semantics (void)
{
@@ -8099,6 +8157,7 @@ potential_constant_expression_1 (tree t, bool want_rval, tsubst_flags_t flags)
case STMT_EXPR:
case EXPR_STMT:
case BIND_EXPR:
+ case TRANSACTION_EXPR:
if (flags & tf_error)
error ("expression %qE is not a constant-expression", t);
return false;
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index e3fb21b84d7..3a5a3414cd9 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -1723,6 +1723,19 @@ Program Interface v3.0 @w{@uref{http://www.openmp.org/}}. This option
implies @option{-pthread}, and thus is only supported on targets that
have support for @option{-pthread}.
+@item -fgnu-tm
+@opindex fgnu-tm
+When the option @option{-fgnu-tm} is specified, the compiler will
+generate code for the Linux variant of Intel's current Transactional
+Memory ABI specification document (Revision 1.1, May 6 2009). This is
+an experimental feature whose interface may change in future versions
+of GCC, as the official specification changes. Please note that not
+all architectures are supported for this feature.
+
+For more information on GCC's support for transactional memory,
+@xref{Enabling libitm,,The GNU Transactional Memory Library,libitm,GNU
+Transactional Memory Library}.
+
@item -fms-extensions
@opindex fms-extensions
Accept some non-standard constructs used in Microsoft header files.
@@ -9113,6 +9126,13 @@ parameters only when their cumulative size is less or equal to
@option{ipa-sra-ptr-growth-factor} times the size of the original
pointer parameter.
+@item tm-max-aggregate-size
+When making copies of thread-local variables in a transaction, this
+parameter specifies the size in bytes after which variables will be
+saved with the logging functions as opposed to save/restore code
+sequence pairs. This option only applies when using
+@option{-fgnu-tm}.
+
@item graphite-max-nb-scop-params
To avoid exponential effects in the Graphite loop transforms, the
number of parameters in a Static Control Part (SCoP) is bounded. The
diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi
index fed770265b9..d96932b4e48 100644
--- a/gcc/doc/tm.texi
+++ b/gcc/doc/tm.texi
@@ -5758,6 +5758,14 @@ mode returned by @code{TARGET_VECTORIZE_PREFERRED_SIMD_MODE}.
The default is zero which means to not iterate over other vector sizes.
@end deftypefn
+@deftypefn {Target Hook} tree TARGET_VECTORIZE_BUILTIN_TM_LOAD (tree)
+This hook should return the built-in decl needed to load a vector of the given type within a transaction.
+@end deftypefn
+
+@deftypefn {Target Hook} tree TARGET_VECTORIZE_BUILTIN_TM_STORE (tree)
+This hook should return the built-in decl needed to store a vector of the given type within a transaction.
+@end deftypefn
+
@deftypefn {Target Hook} tree TARGET_VECTORIZE_BUILTIN_GATHER (const_tree @var{mem_vectype}, const_tree @var{index_type}, int @var{scale})
Target builtin that implements vector gather operation. @var{mem_vectype}
is the vector type of the load and @var{index_type} is scalar type of
diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in
index f0c6ce0a771..146e38a35e1 100644
--- a/gcc/doc/tm.texi.in
+++ b/gcc/doc/tm.texi.in
@@ -5696,6 +5696,10 @@ mode returned by @code{TARGET_VECTORIZE_PREFERRED_SIMD_MODE}.
The default is zero which means to not iterate over other vector sizes.
@end deftypefn
+@hook TARGET_VECTORIZE_BUILTIN_TM_LOAD
+
+@hook TARGET_VECTORIZE_BUILTIN_TM_STORE
+
@hook TARGET_VECTORIZE_BUILTIN_GATHER
Target builtin that implements vector gather operation. @var{mem_vectype}
is the vector type of the load and @var{index_type} is scalar type of
diff --git a/gcc/emit-rtl.c b/gcc/emit-rtl.c
index c2bc56b9758..4a27a058b83 100644
--- a/gcc/emit-rtl.c
+++ b/gcc/emit-rtl.c
@@ -3595,6 +3595,7 @@ try_split (rtx pat, rtx trial, int last)
case REG_NORETURN:
case REG_SETJMP:
+ case REG_TM:
for (insn = insn_last; insn != NULL_RTX; insn = PREV_INSN (insn))
{
if (CALL_P (insn))
diff --git a/gcc/gimple-low.c b/gcc/gimple-low.c
index fd03ba4702b..f6deba17938 100644
--- a/gcc/gimple-low.c
+++ b/gcc/gimple-low.c
@@ -396,6 +396,11 @@ lower_stmt (gimple_stmt_iterator *gsi, struct lower_data *data)
lower_sequence (gimple_eh_filter_failure (stmt), data);
break;
+ case GIMPLE_EH_ELSE:
+ lower_sequence (gimple_eh_else_n_body (stmt), data);
+ lower_sequence (gimple_eh_else_e_body (stmt), data);
+ break;
+
case GIMPLE_NOP:
case GIMPLE_ASM:
case GIMPLE_ASSIGN:
@@ -446,6 +451,10 @@ lower_stmt (gimple_stmt_iterator *gsi, struct lower_data *data)
data->cannot_fallthru = false;
return;
+ case GIMPLE_TRANSACTION:
+ lower_sequence (gimple_transaction_body (stmt), data);
+ break;
+
default:
gcc_unreachable ();
}
@@ -727,6 +736,10 @@ gimple_stmt_may_fallthru (gimple stmt)
return (gimple_seq_may_fallthru (gimple_try_eval (stmt))
&& gimple_seq_may_fallthru (gimple_try_cleanup (stmt)));
+ case GIMPLE_EH_ELSE:
+ return (gimple_seq_may_fallthru (gimple_eh_else_n_body (stmt))
+ || gimple_seq_may_fallthru (gimple_eh_else_e_body (stmt)));
+
case GIMPLE_CALL:
/* Functions that do not return do not fall through. */
return (gimple_call_flags (stmt) & ECF_NORETURN) == 0;
diff --git a/gcc/gimple-pretty-print.c b/gcc/gimple-pretty-print.c
index 981d5b094d9..df703b4ce75 100644
--- a/gcc/gimple-pretty-print.c
+++ b/gcc/gimple-pretty-print.c
@@ -33,6 +33,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-pass.h"
#include "gimple.h"
#include "value-prof.h"
+#include "trans-mem.h"
#define INDENT(SPACE) \
do { int i; for (i = 0; i < SPACE; i++) pp_space (buffer); } while (0)
@@ -162,6 +163,7 @@ debug_gimple_seq (gimple_seq seq)
'd' - outputs an int as a decimal,
's' - outputs a string,
'n' - outputs a newline,
+ 'x' - outputs an int as hexadecimal,
'+' - increases indent by 2 then outputs a newline,
'-' - decreases indent by 2 then outputs a newline. */
@@ -216,6 +218,10 @@ dump_gimple_fmt (pretty_printer *buffer, int spc, int flags,
newline_and_indent (buffer, spc);
break;
+ case 'x':
+ pp_scalar (buffer, "%x", va_arg (args, int));
+ break;
+
case '+':
spc += 2;
newline_and_indent (buffer, spc);
@@ -622,6 +628,7 @@ static void
dump_gimple_call (pretty_printer *buffer, gimple gs, int spc, int flags)
{
tree lhs = gimple_call_lhs (gs);
+ tree fn = gimple_call_fn (gs);
if (flags & TDF_ALIAS)
{
@@ -648,8 +655,7 @@ dump_gimple_call (pretty_printer *buffer, gimple gs, int spc, int flags)
dump_gimple_fmt (buffer, spc, flags, "%G <%s, %T", gs,
internal_fn_name (gimple_call_internal_fn (gs)), lhs);
else
- dump_gimple_fmt (buffer, spc, flags, "%G <%T, %T",
- gs, gimple_call_fn (gs), lhs);
+ dump_gimple_fmt (buffer, spc, flags, "%G <%T, %T", gs, fn, lhs);
if (gimple_call_num_args (gs) > 0)
{
pp_string (buffer, ", ");
@@ -672,7 +678,7 @@ dump_gimple_call (pretty_printer *buffer, gimple gs, int spc, int flags)
if (gimple_call_internal_p (gs))
pp_string (buffer, internal_fn_name (gimple_call_internal_fn (gs)));
else
- print_call_name (buffer, gimple_call_fn (gs), flags);
+ print_call_name (buffer, fn, flags);
pp_string (buffer, " (");
dump_gimple_call_args (buffer, gs, flags);
pp_character (buffer, ')');
@@ -689,9 +695,59 @@ dump_gimple_call (pretty_printer *buffer, gimple gs, int spc, int flags)
if (gimple_call_return_slot_opt_p (gs))
pp_string (buffer, " [return slot optimization]");
-
if (gimple_call_tail_p (gs))
pp_string (buffer, " [tail call]");
+
+ /* Dump the arguments of _ITM_beginTransaction sanely. */
+ if (TREE_CODE (fn) == ADDR_EXPR)
+ fn = TREE_OPERAND (fn, 0);
+ if (TREE_CODE (fn) == FUNCTION_DECL && decl_is_tm_clone (fn))
+ pp_string (buffer, " [tm-clone]");
+ if (TREE_CODE (fn) == FUNCTION_DECL
+ && DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL
+ && DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_START
+ && gimple_call_num_args (gs) > 0)
+ {
+ tree t = gimple_call_arg (gs, 0);
+ unsigned HOST_WIDE_INT props;
+ gcc_assert (TREE_CODE (t) == INTEGER_CST);
+
+ pp_string (buffer, " [ ");
+
+ /* Get the transaction code properties. */
+ props = TREE_INT_CST_LOW (t);
+
+ if (props & PR_INSTRUMENTEDCODE)
+ pp_string (buffer, "instrumentedCode ");
+ if (props & PR_UNINSTRUMENTEDCODE)
+ pp_string (buffer, "uninstrumentedCode ");
+ if (props & PR_HASNOXMMUPDATE)
+ pp_string (buffer, "hasNoXMMUpdate ");
+ if (props & PR_HASNOABORT)
+ pp_string (buffer, "hasNoAbort ");
+ if (props & PR_HASNOIRREVOCABLE)
+ pp_string (buffer, "hasNoIrrevocable ");
+ if (props & PR_DOESGOIRREVOCABLE)
+ pp_string (buffer, "doesGoIrrevocable ");
+ if (props & PR_HASNOSIMPLEREADS)
+ pp_string (buffer, "hasNoSimpleReads ");
+ if (props & PR_AWBARRIERSOMITTED)
+ pp_string (buffer, "awBarriersOmitted ");
+ if (props & PR_RARBARRIERSOMITTED)
+ pp_string (buffer, "RaRBarriersOmitted ");
+ if (props & PR_UNDOLOGCODE)
+ pp_string (buffer, "undoLogCode ");
+ if (props & PR_PREFERUNINSTRUMENTED)
+ pp_string (buffer, "preferUninstrumented ");
+ if (props & PR_EXCEPTIONBLOCK)
+ pp_string (buffer, "exceptionBlock ");
+ if (props & PR_HASELSE)
+ pp_string (buffer, "hasElse ");
+ if (props & PR_READONLY)
+ pp_string (buffer, "readOnly ");
+
+ pp_string (buffer, "]");
+ }
}
@@ -947,6 +1003,24 @@ dump_gimple_eh_must_not_throw (pretty_printer *buffer, gimple gs,
}
+/* Dump a GIMPLE_EH_ELSE tuple on the pretty_printer BUFFER, SPC spaces of
+ indent. FLAGS specifies details to show in the dump (see TDF_* in
+ tree-pass.h). */
+
+static void
+dump_gimple_eh_else (pretty_printer *buffer, gimple gs, int spc, int flags)
+{
+ if (flags & TDF_RAW)
+ dump_gimple_fmt (buffer, spc, flags,
+ "%G <%+N_BODY <%S>%nE_BODY <%S>%->", gs,
+ gimple_eh_else_n_body (gs), gimple_eh_else_e_body (gs));
+ else
+ dump_gimple_fmt (buffer, spc, flags,
+ "<<<if_normal_exit>>>%+{%S}%-<<<else_eh_exit>>>%+{%S}",
+ gimple_eh_else_n_body (gs), gimple_eh_else_e_body (gs));
+}
+
+
/* Dump a GIMPLE_RESX tuple on the pretty_printer BUFFER, SPC spaces of
indent. FLAGS specifies details to show in the dump (see TDF_* in
tree-pass.h). */
@@ -1269,6 +1343,86 @@ dump_gimple_omp_return (pretty_printer *buffer, gimple gs, int spc, int flags)
}
}
+/* Dump a GIMPLE_TRANSACTION tuple on the pretty_printer BUFFER. */
+
+static void
+dump_gimple_transaction (pretty_printer *buffer, gimple gs, int spc, int flags)
+{
+ unsigned subcode = gimple_transaction_subcode (gs);
+
+ if (flags & TDF_RAW)
+ {
+ dump_gimple_fmt (buffer, spc, flags,
+ "%G [SUBCODE=%x,LABEL=%T] <%+BODY <%S> >",
+ gs, subcode, gimple_transaction_label (gs),
+ gimple_transaction_body (gs));
+ }
+ else
+ {
+ if (subcode & GTMA_IS_OUTER)
+ pp_string (buffer, "__transaction_atomic [[outer]]");
+ else if (subcode & GTMA_IS_RELAXED)
+ pp_string (buffer, "__transaction_relaxed");
+ else
+ pp_string (buffer, "__transaction_atomic");
+ subcode &= ~GTMA_DECLARATION_MASK;
+
+ if (subcode || gimple_transaction_label (gs))
+ {
+ pp_string (buffer, " //");
+ if (gimple_transaction_label (gs))
+ {
+ pp_string (buffer, " LABEL=");
+ dump_generic_node (buffer, gimple_transaction_label (gs),
+ spc, flags, false);
+ }
+ if (subcode)
+ {
+ pp_string (buffer, " SUBCODE=[ ");
+ if (subcode & GTMA_HAVE_ABORT)
+ {
+ pp_string (buffer, "GTMA_HAVE_ABORT ");
+ subcode &= ~GTMA_HAVE_ABORT;
+ }
+ if (subcode & GTMA_HAVE_LOAD)
+ {
+ pp_string (buffer, "GTMA_HAVE_LOAD ");
+ subcode &= ~GTMA_HAVE_LOAD;
+ }
+ if (subcode & GTMA_HAVE_STORE)
+ {
+ pp_string (buffer, "GTMA_HAVE_STORE ");
+ subcode &= ~GTMA_HAVE_STORE;
+ }
+ if (subcode & GTMA_MAY_ENTER_IRREVOCABLE)
+ {
+ pp_string (buffer, "GTMA_MAY_ENTER_IRREVOCABLE ");
+ subcode &= ~GTMA_MAY_ENTER_IRREVOCABLE;
+ }
+ if (subcode & GTMA_DOES_GO_IRREVOCABLE)
+ {
+ pp_string (buffer, "GTMA_DOES_GO_IRREVOCABLE ");
+ subcode &= ~GTMA_DOES_GO_IRREVOCABLE;
+ }
+ if (subcode)
+ pp_printf (buffer, "0x%x ", subcode);
+ pp_string (buffer, "]");
+ }
+ }
+
+ if (!gimple_seq_empty_p (gimple_transaction_body (gs)))
+ {
+ newline_and_indent (buffer, spc + 2);
+ pp_character (buffer, '{');
+ pp_newline (buffer);
+ dump_gimple_seq (buffer, gimple_transaction_body (gs),
+ spc + 4, flags);
+ newline_and_indent (buffer, spc + 2);
+ pp_character (buffer, '}');
+ }
+ }
+}
+
/* Dump a GIMPLE_ASM tuple on the pretty_printer BUFFER, SPC spaces of
indent. FLAGS specifies details to show in the dump (see TDF_* in
tree-pass.h). */
@@ -1855,6 +2009,10 @@ dump_gimple_stmt (pretty_printer *buffer, gimple gs, int spc, int flags)
dump_gimple_eh_must_not_throw (buffer, gs, spc, flags);
break;
+ case GIMPLE_EH_ELSE:
+ dump_gimple_eh_else (buffer, gs, spc, flags);
+ break;
+
case GIMPLE_RESX:
dump_gimple_resx (buffer, gs, spc, flags);
break;
@@ -1877,6 +2035,10 @@ dump_gimple_stmt (pretty_printer *buffer, gimple gs, int spc, int flags)
pp_string (buffer, " predictor.");
break;
+ case GIMPLE_TRANSACTION:
+ dump_gimple_transaction (buffer, gs, spc, flags);
+ break;
+
default:
GIMPLE_NIY;
}
diff --git a/gcc/gimple.c b/gcc/gimple.c
index b2874bb071b..e803f56d0a3 100644
--- a/gcc/gimple.c
+++ b/gcc/gimple.c
@@ -743,6 +743,17 @@ gimple_build_eh_must_not_throw (tree decl)
return p;
}
+/* Build a GIMPLE_EH_ELSE statement. */
+
+gimple
+gimple_build_eh_else (gimple_seq n_body, gimple_seq e_body)
+{
+ gimple p = gimple_alloc (GIMPLE_EH_ELSE, 0);
+ gimple_eh_else_set_n_body (p, n_body);
+ gimple_eh_else_set_e_body (p, e_body);
+ return p;
+}
+
/* Build a GIMPLE_TRY statement.
EVAL is the expression to evaluate.
@@ -1146,6 +1157,17 @@ gimple_build_omp_atomic_store (tree val)
return p;
}
+/* Build a GIMPLE_TRANSACTION statement. */
+
+gimple
+gimple_build_transaction (gimple_seq body, tree label)
+{
+ gimple p = gimple_alloc (GIMPLE_TRANSACTION, 0);
+ gimple_transaction_set_body (p, body);
+ gimple_transaction_set_label (p, label);
+ return p;
+}
+
/* Build a GIMPLE_PREDICT statement. PREDICT is one of the predictors from
predict.def, OUTCOME is NOT_TAKEN or TAKEN. */
@@ -1319,9 +1341,11 @@ gimple_seq_copy (gimple_seq src)
/* Walk all the statements in the sequence SEQ calling walk_gimple_stmt
on each one. WI is as in walk_gimple_stmt.
- If walk_gimple_stmt returns non-NULL, the walk is stopped, the
- value is stored in WI->CALLBACK_RESULT and the statement that
- produced the value is returned.
+ If walk_gimple_stmt returns non-NULL, the walk is stopped, and the
+ value is stored in WI->CALLBACK_RESULT. Also, the statement that
+ produced the value is returned if this statement has not been
+ removed by a callback (wi->removed_stmt). If the statement has
+ been removed, NULL is returned.
Otherwise, all the statements are walked and NULL returned. */
@@ -1331,7 +1355,7 @@ walk_gimple_seq (gimple_seq seq, walk_stmt_fn callback_stmt,
{
gimple_stmt_iterator gsi;
- for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi))
+ for (gsi = gsi_start (seq); !gsi_end_p (gsi); )
{
tree ret = walk_gimple_stmt (&gsi, callback_stmt, callback_op, wi);
if (ret)
@@ -1340,8 +1364,12 @@ walk_gimple_seq (gimple_seq seq, walk_stmt_fn callback_stmt,
to hold it. */
gcc_assert (wi);
wi->callback_result = ret;
- return gsi_stmt (gsi);
+
+ return wi->removed_stmt ? NULL : gsi_stmt (gsi);
}
+
+ if (!wi->removed_stmt)
+ gsi_next (&gsi);
}
if (wi)
@@ -1680,6 +1708,13 @@ walk_gimple_op (gimple stmt, walk_tree_fn callback_op,
return ret;
break;
+ case GIMPLE_TRANSACTION:
+ ret = walk_tree (gimple_transaction_label_ptr (stmt), callback_op,
+ wi, pset);
+ if (ret)
+ return ret;
+ break;
+
/* Tuples that do not have operands. */
case GIMPLE_NOP:
case GIMPLE_RESX:
@@ -1730,10 +1765,13 @@ walk_gimple_stmt (gimple_stmt_iterator *gsi, walk_stmt_fn callback_stmt,
gimple stmt = gsi_stmt (*gsi);
if (wi)
- wi->gsi = *gsi;
+ {
+ wi->gsi = *gsi;
+ wi->removed_stmt = false;
- if (wi && wi->want_locations && gimple_has_location (stmt))
- input_location = gimple_location (stmt);
+ if (wi->want_locations && gimple_has_location (stmt))
+ input_location = gimple_location (stmt);
+ }
ret = NULL;
@@ -1750,6 +1788,9 @@ walk_gimple_stmt (gimple_stmt_iterator *gsi, walk_stmt_fn callback_stmt,
a value to return. */
gcc_assert (tree_ret == NULL);
+ if (wi && wi->removed_stmt)
+ return NULL;
+
/* Re-read stmt in case the callback changed it. */
stmt = gsi_stmt (*gsi);
}
@@ -1786,6 +1827,17 @@ walk_gimple_stmt (gimple_stmt_iterator *gsi, walk_stmt_fn callback_stmt,
return wi->callback_result;
break;
+ case GIMPLE_EH_ELSE:
+ ret = walk_gimple_seq (gimple_eh_else_n_body (stmt),
+ callback_stmt, callback_op, wi);
+ if (ret)
+ return wi->callback_result;
+ ret = walk_gimple_seq (gimple_eh_else_e_body (stmt),
+ callback_stmt, callback_op, wi);
+ if (ret)
+ return wi->callback_result;
+ break;
+
case GIMPLE_TRY:
ret = walk_gimple_seq (gimple_try_eval (stmt), callback_stmt, callback_op,
wi);
@@ -1813,8 +1865,8 @@ walk_gimple_stmt (gimple_stmt_iterator *gsi, walk_stmt_fn callback_stmt,
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
- ret = walk_gimple_seq (gimple_omp_body (stmt), callback_stmt, callback_op,
- wi);
+ ret = walk_gimple_seq (gimple_omp_body (stmt), callback_stmt,
+ callback_op, wi);
if (ret)
return wi->callback_result;
break;
@@ -1826,6 +1878,13 @@ walk_gimple_stmt (gimple_stmt_iterator *gsi, walk_stmt_fn callback_stmt,
return wi->callback_result;
break;
+ case GIMPLE_TRANSACTION:
+ ret = walk_gimple_seq (gimple_transaction_body (stmt),
+ callback_stmt, callback_op, wi);
+ if (ret)
+ return wi->callback_result;
+ break;
+
default:
gcc_assert (!gimple_has_substatements (stmt));
break;
@@ -2252,6 +2311,13 @@ gimple_copy (gimple stmt)
gimple_eh_filter_set_types (copy, t);
break;
+ case GIMPLE_EH_ELSE:
+ new_seq = gimple_seq_copy (gimple_eh_else_n_body (stmt));
+ gimple_eh_else_set_n_body (copy, new_seq);
+ new_seq = gimple_seq_copy (gimple_eh_else_e_body (stmt));
+ gimple_eh_else_set_e_body (copy, new_seq);
+ break;
+
case GIMPLE_TRY:
new_seq = gimple_seq_copy (gimple_try_eval (stmt));
gimple_try_set_eval (copy, new_seq);
@@ -2327,6 +2393,11 @@ gimple_copy (gimple stmt)
gimple_omp_set_body (copy, new_seq);
break;
+ case GIMPLE_TRANSACTION:
+ new_seq = gimple_seq_copy (gimple_transaction_body (stmt));
+ gimple_transaction_set_body (copy, new_seq);
+ break;
+
case GIMPLE_WITH_CLEANUP_EXPR:
new_seq = gimple_seq_copy (gimple_wce_cleanup (stmt));
gimple_wce_set_cleanup (copy, new_seq);
@@ -2782,37 +2853,6 @@ is_gimple_address (const_tree t)
}
}
-/* Strip out all handled components that produce invariant
- offsets. */
-
-static const_tree
-strip_invariant_refs (const_tree op)
-{
- while (handled_component_p (op))
- {
- switch (TREE_CODE (op))
- {
- case ARRAY_REF:
- case ARRAY_RANGE_REF:
- if (!is_gimple_constant (TREE_OPERAND (op, 1))
- || TREE_OPERAND (op, 2) != NULL_TREE
- || TREE_OPERAND (op, 3) != NULL_TREE)
- return NULL;
- break;
-
- case COMPONENT_REF:
- if (TREE_OPERAND (op, 2) != NULL_TREE)
- return NULL;
- break;
-
- default:;
- }
- op = TREE_OPERAND (op, 0);
- }
-
- return op;
-}
-
/* Return true if T is a gimple invariant address. */
bool
@@ -3075,21 +3115,6 @@ is_gimple_mem_ref_addr (tree t)
|| decl_address_invariant_p (TREE_OPERAND (t, 0)))));
}
-/* If T makes a function call, return the corresponding CALL_EXPR operand.
- Otherwise, return NULL_TREE. */
-
-tree
-get_call_expr_in (tree t)
-{
- if (TREE_CODE (t) == MODIFY_EXPR)
- t = TREE_OPERAND (t, 1);
- if (TREE_CODE (t) == WITH_SIZE_EXPR)
- t = TREE_OPERAND (t, 0);
- if (TREE_CODE (t) == CALL_EXPR)
- return t;
- return NULL_TREE;
-}
-
/* Given a memory reference expression T, return its base address.
The base address of a memory reference expression is the main
diff --git a/gcc/gimple.def b/gcc/gimple.def
index 2b5488ab0b6..5ae97025cb2 100644
--- a/gcc/gimple.def
+++ b/gcc/gimple.def
@@ -124,6 +124,14 @@ DEFGSCODE(GIMPLE_ASM, "gimple_asm", GSS_ASM)
CHAIN is the optional static chain link for nested functions. */
DEFGSCODE(GIMPLE_CALL, "gimple_call", GSS_CALL)
+/* GIMPLE_TRANSACTION <BODY, LABEL> represents __transaction_atomic and
+ __transaction_relaxed blocks.
+ BODY is the sequence of statements inside the transaction.
+ LABEL is a label for the statement immediately following the
+ transaction. This is before RETURN so that it has MEM_OPS,
+ so that it can clobber global memory. */
+DEFGSCODE(GIMPLE_TRANSACTION, "gimple_transaction", GSS_TRANSACTION)
+
/* GIMPLE_RETURN <RETVAL> represents return statements.
RETVAL is the value to return or NULL. If a value is returned it
@@ -151,6 +159,12 @@ DEFGSCODE(GIMPLE_EH_FILTER, "gimple_eh_filter", GSS_EH_FILTER)
be invoked if an exception propagates to this point. */
DEFGSCODE(GIMPLE_EH_MUST_NOT_THROW, "gimple_eh_must_not_throw", GSS_EH_MNT)
+/* GIMPLE_EH_ELSE <N_BODY, E_BODY> must be the sole contents of
+ a GIMPLE_TRY_FINALLY node. For all normal exits from the try block,
+ N_BODY is run; for all exception exits from the try block,
+ E_BODY is run. */
+DEFGSCODE(GIMPLE_EH_ELSE, "gimple_eh_else", GSS_EH_ELSE)
+
/* GIMPLE_RESX resumes execution after an exception. */
DEFGSCODE(GIMPLE_RESX, "gimple_resx", GSS_EH_CTRL)
diff --git a/gcc/gimple.h b/gcc/gimple.h
index 666c44c8591..ffecc2617af 100644
--- a/gcc/gimple.h
+++ b/gcc/gimple.h
@@ -487,6 +487,15 @@ struct GTY(()) gimple_statement_eh_filter {
gimple_seq failure;
};
+/* GIMPLE_EH_ELSE */
+
+struct GTY(()) gimple_statement_eh_else {
+ /* [ WORD 1-4 ] */
+ struct gimple_statement_base gsbase;
+
+ /* [ WORD 5,6 ] */
+ gimple_seq n_body, e_body;
+};
/* GIMPLE_EH_MUST_NOT_THROW */
@@ -757,6 +766,43 @@ struct GTY(()) gimple_statement_omp_atomic_store {
tree val;
};
+/* GIMPLE_TRANSACTION. */
+
+/* Bits to be stored in the GIMPLE_TRANSACTION subcode. */
+
+/* The __transaction_atomic was declared [[outer]] or it is
+ __transaction_relaxed. */
+#define GTMA_IS_OUTER (1u << 0)
+#define GTMA_IS_RELAXED (1u << 1)
+#define GTMA_DECLARATION_MASK (GTMA_IS_OUTER | GTMA_IS_RELAXED)
+
+/* The transaction is seen to not have an abort. */
+#define GTMA_HAVE_ABORT (1u << 2)
+/* The transaction is seen to have loads or stores. */
+#define GTMA_HAVE_LOAD (1u << 3)
+#define GTMA_HAVE_STORE (1u << 4)
+/* The transaction MAY enter serial irrevocable mode in its dynamic scope. */
+#define GTMA_MAY_ENTER_IRREVOCABLE (1u << 5)
+/* The transaction WILL enter serial irrevocable mode.
+ An irrevocable block post-dominates the entire transaction, such
+ that all invocations of the transaction will go serial-irrevocable.
+ In such case, we don't bother instrumenting the transaction, and
+ tell the runtime that it should begin the transaction in
+ serial-irrevocable mode. */
+#define GTMA_DOES_GO_IRREVOCABLE (1u << 6)
+
+struct GTY(()) gimple_statement_transaction
+{
+ /* [ WORD 1-10 ] */
+ struct gimple_statement_with_memory_ops_base gsbase;
+
+ /* [ WORD 11 ] */
+ gimple_seq body;
+
+ /* [ WORD 12 ] */
+ tree label;
+};
+
#define DEFGSSTRUCT(SYM, STRUCT, HAS_TREE_OP) SYM,
enum gimple_statement_structure_enum {
#include "gsstruct.def"
@@ -779,6 +825,7 @@ union GTY ((desc ("gimple_statement_structure (&%h)"), variable_size)) gimple_st
struct gimple_statement_catch GTY ((tag ("GSS_CATCH"))) gimple_catch;
struct gimple_statement_eh_filter GTY ((tag ("GSS_EH_FILTER"))) gimple_eh_filter;
struct gimple_statement_eh_mnt GTY ((tag ("GSS_EH_MNT"))) gimple_eh_mnt;
+ struct gimple_statement_eh_else GTY ((tag ("GSS_EH_ELSE"))) gimple_eh_else;
struct gimple_statement_phi GTY ((tag ("GSS_PHI"))) gimple_phi;
struct gimple_statement_eh_ctrl GTY ((tag ("GSS_EH_CTRL"))) gimple_eh_ctrl;
struct gimple_statement_try GTY ((tag ("GSS_TRY"))) gimple_try;
@@ -793,6 +840,7 @@ union GTY ((desc ("gimple_statement_structure (&%h)"), variable_size)) gimple_st
struct gimple_statement_omp_continue GTY ((tag ("GSS_OMP_CONTINUE"))) gimple_omp_continue;
struct gimple_statement_omp_atomic_load GTY ((tag ("GSS_OMP_ATOMIC_LOAD"))) gimple_omp_atomic_load;
struct gimple_statement_omp_atomic_store GTY ((tag ("GSS_OMP_ATOMIC_STORE"))) gimple_omp_atomic_store;
+ struct gimple_statement_transaction GTY((tag ("GSS_TRANSACTION"))) gimple_transaction;
};
/* In gimple.c. */
@@ -846,6 +894,7 @@ gimple gimple_build_asm_vec (const char *, VEC(tree,gc) *, VEC(tree,gc) *,
gimple gimple_build_catch (tree, gimple_seq);
gimple gimple_build_eh_filter (tree, gimple_seq);
gimple gimple_build_eh_must_not_throw (tree);
+gimple gimple_build_eh_else (gimple_seq, gimple_seq);
gimple gimple_build_try (gimple_seq, gimple_seq, enum gimple_try_flags);
gimple gimple_build_wce (gimple_seq);
gimple gimple_build_resx (int);
@@ -868,6 +917,7 @@ gimple gimple_build_omp_single (gimple_seq, tree);
gimple gimple_build_cdt (tree, tree);
gimple gimple_build_omp_atomic_load (tree, tree);
gimple gimple_build_omp_atomic_store (tree);
+gimple gimple_build_transaction (gimple_seq, tree);
gimple gimple_build_predict (enum br_predictor, enum prediction);
enum gimple_statement_structure_enum gss_for_assign (enum tree_code);
void sort_case_labels (VEC(tree,heap) *);
@@ -963,8 +1013,6 @@ extern bool is_gimple_non_addressable (tree t);
/* Returns true iff T is a valid call address expression. */
extern bool is_gimple_call_addr (tree);
-/* If T makes a function call, returns the CALL_EXPR operand. */
-extern tree get_call_expr_in (tree t);
extern void recalculate_side_effects (tree);
extern bool gimple_compare_field_offset (tree, tree);
@@ -1076,6 +1124,9 @@ extern tree canonicalize_cond_expr_cond (tree);
/* In omp-low.c. */
extern tree omp_reduction_init (tree, tree);
+/* In trans-mem.c. */
+extern void diagnose_tm_safe_errors (tree);
+
/* In tree-nested.c. */
extern void lower_nested_functions (tree);
extern void insert_field_into_struct (tree, tree);
@@ -1134,6 +1185,7 @@ gimple_has_substatements (gimple g)
case GIMPLE_BIND:
case GIMPLE_CATCH:
case GIMPLE_EH_FILTER:
+ case GIMPLE_EH_ELSE:
case GIMPLE_TRY:
case GIMPLE_OMP_FOR:
case GIMPLE_OMP_MASTER:
@@ -1145,6 +1197,7 @@ gimple_has_substatements (gimple g)
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_CRITICAL:
case GIMPLE_WITH_CLEANUP_EXPR:
+ case GIMPLE_TRANSACTION:
return true;
default:
@@ -3177,6 +3230,35 @@ gimple_eh_must_not_throw_set_fndecl (gimple gs, tree decl)
gs->gimple_eh_mnt.fndecl = decl;
}
+/* GIMPLE_EH_ELSE accessors. */
+
+static inline gimple_seq
+gimple_eh_else_n_body (gimple gs)
+{
+ GIMPLE_CHECK (gs, GIMPLE_EH_ELSE);
+ return gs->gimple_eh_else.n_body;
+}
+
+static inline gimple_seq
+gimple_eh_else_e_body (gimple gs)
+{
+ GIMPLE_CHECK (gs, GIMPLE_EH_ELSE);
+ return gs->gimple_eh_else.e_body;
+}
+
+static inline void
+gimple_eh_else_set_n_body (gimple gs, gimple_seq seq)
+{
+ GIMPLE_CHECK (gs, GIMPLE_EH_ELSE);
+ gs->gimple_eh_else.n_body = seq;
+}
+
+static inline void
+gimple_eh_else_set_e_body (gimple gs, gimple_seq seq)
+{
+ GIMPLE_CHECK (gs, GIMPLE_EH_ELSE);
+ gs->gimple_eh_else.e_body = seq;
+}
/* GIMPLE_TRY accessors. */
@@ -4555,6 +4637,67 @@ gimple_omp_continue_set_control_use (gimple g, tree use)
g->gimple_omp_continue.control_use = use;
}
+/* Return the body for the GIMPLE_TRANSACTION statement GS. */
+
+static inline gimple_seq
+gimple_transaction_body (gimple gs)
+{
+ GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
+ return gs->gimple_transaction.body;
+}
+
+/* Return the label associated with a GIMPLE_TRANSACTION. */
+
+static inline tree
+gimple_transaction_label (const_gimple gs)
+{
+ GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
+ return gs->gimple_transaction.label;
+}
+
+static inline tree *
+gimple_transaction_label_ptr (gimple gs)
+{
+ GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
+ return &gs->gimple_transaction.label;
+}
+
+/* Return the subcode associated with a GIMPLE_TRANSACTION. */
+
+static inline unsigned int
+gimple_transaction_subcode (const_gimple gs)
+{
+ GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
+ return gs->gsbase.subcode;
+}
+
+/* Set BODY to be the body for the GIMPLE_TRANSACTION statement GS. */
+
+static inline void
+gimple_transaction_set_body (gimple gs, gimple_seq body)
+{
+ GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
+ gs->gimple_transaction.body = body;
+}
+
+/* Set the label associated with a GIMPLE_TRANSACTION. */
+
+static inline void
+gimple_transaction_set_label (gimple gs, tree label)
+{
+ GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
+ gs->gimple_transaction.label = label;
+}
+
+/* Set the subcode associated with a GIMPLE_TRANSACTION. */
+
+static inline void
+gimple_transaction_set_subcode (gimple gs, unsigned int subcode)
+{
+ GIMPLE_CHECK (gs, GIMPLE_TRANSACTION);
+ gs->gsbase.subcode = subcode;
+}
+
/* Return a pointer to the return value for GIMPLE_RETURN GS. */
@@ -4981,6 +5124,12 @@ struct walk_stmt_info
will be visited more than once. */
struct pointer_set_t *pset;
+ /* Operand returned by the callbacks. This is set when calling
+ walk_gimple_seq. If the walk_stmt_fn or walk_tree_fn callback
+ returns non-NULL, this field will contain the tree returned by
+ the last callback. */
+ tree callback_result;
+
/* Indicates whether the operand being examined may be replaced
with something that matches is_gimple_val (if true) or something
slightly more complicated (if false). "Something" technically
@@ -4993,23 +5142,20 @@ struct walk_stmt_info
statement 'foo (&var)', the flag VAL_ONLY will initially be set
to true, however, when walking &var, the operand of that
ADDR_EXPR does not need to be a GIMPLE value. */
- bool val_only;
+ BOOL_BITFIELD val_only : 1;
/* True if we are currently walking the LHS of an assignment. */
- bool is_lhs;
+ BOOL_BITFIELD is_lhs : 1;
/* Optional. Set to true by the callback functions if they made any
changes. */
- bool changed;
+ BOOL_BITFIELD changed : 1;
/* True if we're interested in location information. */
- bool want_locations;
+ BOOL_BITFIELD want_locations : 1;
- /* Operand returned by the callbacks. This is set when calling
- walk_gimple_seq. If the walk_stmt_fn or walk_tree_fn callback
- returns non-NULL, this field will contain the tree returned by
- the last callback. */
- tree callback_result;
+ /* True if we've removed the statement that was processed. */
+ BOOL_BITFIELD removed_stmt : 1;
};
/* Callback for walk_gimple_stmt. Called for every statement found
diff --git a/gcc/gimplify.c b/gcc/gimplify.c
index 8c2c5ac2c9c..99e0d0dad90 100644
--- a/gcc/gimplify.c
+++ b/gcc/gimplify.c
@@ -413,6 +413,8 @@ create_tmp_var_name (const char *prefix)
char *preftmp = ASTRDUP (prefix);
remove_suffix (preftmp, strlen (preftmp));
+ clean_symbol_name (preftmp);
+
prefix = preftmp;
}
@@ -1072,6 +1074,12 @@ voidify_wrapper_expr (tree wrapper, tree temp)
}
break;
+ case TRANSACTION_EXPR:
+ TREE_SIDE_EFFECTS (*p) = 1;
+ TREE_TYPE (*p) = void_type_node;
+ p = &TRANSACTION_EXPR_BODY (*p);
+ break;
+
default:
goto out;
}
@@ -6527,6 +6535,53 @@ gimplify_omp_atomic (tree *expr_p, gimple_seq *pre_p)
return GS_ALL_DONE;
}
+/* Gimplify a TRANSACTION_EXPR. This involves gimplification of the
+ body, and adding some EH bits. */
+
+static enum gimplify_status
+gimplify_transaction (tree *expr_p, gimple_seq *pre_p)
+{
+ tree expr = *expr_p, temp, tbody = TRANSACTION_EXPR_BODY (expr);
+ gimple g;
+ gimple_seq body = NULL;
+ struct gimplify_ctx gctx;
+ int subcode = 0;
+
+ /* Wrap the transaction body in a BIND_EXPR so we have a context
+ where to put decls for OpenMP. */
+ if (TREE_CODE (tbody) != BIND_EXPR)
+ {
+ tree bind = build3 (BIND_EXPR, void_type_node, NULL, tbody, NULL);
+ TREE_SIDE_EFFECTS (bind) = 1;
+ SET_EXPR_LOCATION (bind, EXPR_LOCATION (tbody));
+ TRANSACTION_EXPR_BODY (expr) = bind;
+ }
+
+ push_gimplify_context (&gctx);
+ temp = voidify_wrapper_expr (*expr_p, NULL);
+
+ g = gimplify_and_return_first (TRANSACTION_EXPR_BODY (expr), &body);
+ pop_gimplify_context (g);
+
+ g = gimple_build_transaction (body, NULL);
+ if (TRANSACTION_EXPR_OUTER (expr))
+ subcode = GTMA_IS_OUTER;
+ else if (TRANSACTION_EXPR_RELAXED (expr))
+ subcode = GTMA_IS_RELAXED;
+ gimple_transaction_set_subcode (g, subcode);
+
+ gimplify_seq_add_stmt (pre_p, g);
+
+ if (temp)
+ {
+ *expr_p = temp;
+ return GS_OK;
+ }
+
+ *expr_p = NULL_TREE;
+ return GS_ALL_DONE;
+}
+
/* Convert the GENERIC expression tree *EXPR_P to GIMPLE. If the
expression produces a value to be used as an operand inside a GIMPLE
statement, the value will be stored back in *EXPR_P. This value will
@@ -7251,6 +7306,10 @@ gimplify_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
ret = gimplify_omp_atomic (expr_p, pre_p);
break;
+ case TRANSACTION_EXPR:
+ ret = gimplify_transaction (expr_p, pre_p);
+ break;
+
case TRUTH_AND_EXPR:
case TRUTH_OR_EXPR:
case TRUTH_XOR_EXPR:
diff --git a/gcc/gsstruct.def b/gcc/gsstruct.def
index 0b6531e6ca4..0e5727f2d98 100644
--- a/gcc/gsstruct.def
+++ b/gcc/gsstruct.def
@@ -38,6 +38,7 @@ DEFGSSTRUCT(GSS_CATCH, gimple_statement_catch, false)
DEFGSSTRUCT(GSS_EH_FILTER, gimple_statement_eh_filter, false)
DEFGSSTRUCT(GSS_EH_MNT, gimple_statement_eh_mnt, false)
DEFGSSTRUCT(GSS_EH_CTRL, gimple_statement_eh_ctrl, false)
+DEFGSSTRUCT(GSS_EH_ELSE, gimple_statement_eh_else, false)
DEFGSSTRUCT(GSS_WCE, gimple_statement_wce, false)
DEFGSSTRUCT(GSS_OMP, gimple_statement_omp, false)
DEFGSSTRUCT(GSS_OMP_CRITICAL, gimple_statement_omp_critical, false)
@@ -49,3 +50,4 @@ DEFGSSTRUCT(GSS_OMP_SINGLE, gimple_statement_omp_single, false)
DEFGSSTRUCT(GSS_OMP_CONTINUE, gimple_statement_omp_continue, false)
DEFGSSTRUCT(GSS_OMP_ATOMIC_LOAD, gimple_statement_omp_atomic_load, false)
DEFGSSTRUCT(GSS_OMP_ATOMIC_STORE, gimple_statement_omp_atomic_store, false)
+DEFGSSTRUCT(GSS_TRANSACTION, gimple_statement_transaction, false)
diff --git a/gcc/gtm-builtins.def b/gcc/gtm-builtins.def
new file mode 100644
index 00000000000..9fcbdb0cc36
--- /dev/null
+++ b/gcc/gtm-builtins.def
@@ -0,0 +1,208 @@
+DEF_TM_BUILTIN (BUILT_IN_TM_START, "_ITM_beginTransaction",
+ BT_FN_UINT_UINT, ATTR_TM_NOTHROW_LIST)
+
+DEF_TM_BUILTIN (BUILT_IN_TM_COMMIT, "_ITM_commitTransaction",
+ BT_FN_VOID, ATTR_TM_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_COMMIT_EH, "_ITM_commitTransactionEH",
+ BT_FN_VOID_PTR, ATTR_TM_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_ABORT, "_ITM_abortTransaction",
+ BT_FN_INT, ATTR_TM_NORETURN_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_IRREVOCABLE, "_ITM_changeTransactionMode",
+ BT_FN_INT_INT, ATTR_TM_NOTHROW_LIST)
+
+DEF_TM_BUILTIN (BUILT_IN_TM_MEMCPY, "_ITM_memcpyRtWt",
+ BT_FN_PTR_PTR_CONST_PTR_SIZE, ATTR_TM_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_MEMMOVE, "_ITM_memmoveRtWt",
+ BT_FN_PTR_PTR_CONST_PTR_SIZE, ATTR_TM_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_MEMSET, "_ITM_memsetW",
+ BT_FN_PTR_PTR_INT_SIZE, ATTR_TM_TMPURE_NOTHROW_LIST)
+
+DEF_TM_BUILTIN (BUILT_IN_TM_GETTMCLONE_IRR, "_ITM_getTMCloneOrIrrevocable",
+ BT_FN_PTR_PTR, ATTR_TM_CONST_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_GETTMCLONE_SAFE, "_ITM_getTMCloneSafe",
+ BT_FN_PTR_PTR, ATTR_TM_CONST_NOTHROW_LIST)
+
+/* Memory allocation builtins. */
+DEF_TM_BUILTIN (BUILT_IN_TM_MALLOC, "_ITM_malloc",
+ BT_FN_PTR_SIZE, ATTR_TMPURE_MALLOC_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_CALLOC, "_ITM_calloc",
+ BT_FN_PTR_SIZE_SIZE, ATTR_TMPURE_MALLOC_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_FREE, "_ITM_free",
+ BT_FN_VOID_PTR, ATTR_TMPURE_NOTHROW_LIST)
+
+/* Logging builtins. */
+DEF_TM_BUILTIN (BUILT_IN_TM_LOG_1, "_ITM_LU1",
+ BT_FN_VOID_VPTR, ATTR_TM_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOG_2, "_ITM_LU2",
+ BT_FN_VOID_VPTR, ATTR_TM_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOG_4, "_ITM_LU4",
+ BT_FN_VOID_VPTR, ATTR_TM_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOG_8, "_ITM_LU8",
+ BT_FN_VOID_VPTR, ATTR_TM_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOG_FLOAT, "_ITM_LF",
+ BT_FN_VOID_VPTR, ATTR_TM_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOG_DOUBLE, "_ITM_LD",
+ BT_FN_VOID_VPTR, ATTR_TM_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOG_LDOUBLE, "_ITM_LE",
+ BT_FN_VOID_VPTR, ATTR_TM_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOG, "_ITM_LB",
+ BT_FN_VOID_VPTR_SIZE, ATTR_TM_TMPURE_NOTHROW_LIST)
+
+/* These stubs should get defined in the backend if applicable. */
+DEF_BUILTIN_STUB (BUILT_IN_TM_LOG_M64, "__builtin__ITM_LM64")
+DEF_BUILTIN_STUB (BUILT_IN_TM_LOG_M128, "__builtin__ITM_LM128")
+DEF_BUILTIN_STUB (BUILT_IN_TM_LOG_M256, "__builtin__ITM_LM256")
+
+/* Writes.
+
+ Note: The writes must follow the following order: STORE, WAR, WAW.
+ The TM optimizations depend on this order.
+
+ BUILT_IN_TM_STORE_1 must be the first builtin.
+ BUILTIN_TM_LOAD_STORE_P depends on this. */
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_1, "_ITM_WU1",
+ BT_FN_VOID_VPTR_I1, ATTR_TM_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_WAR_1, "_ITM_WaRU1",
+ BT_FN_VOID_VPTR_I1, ATTR_TM_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_WAW_1, "_ITM_WaWU1",
+ BT_FN_VOID_VPTR_I1, ATTR_TM_NOTHROW_LIST)
+
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_2, "_ITM_WU2",
+ BT_FN_VOID_VPTR_I2, ATTR_TM_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_WAR_2, "_ITM_WaRU2",
+ BT_FN_VOID_VPTR_I2, ATTR_TM_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_WAW_2, "_ITM_WaWU2",
+ BT_FN_VOID_VPTR_I2, ATTR_TM_NOTHROW_LIST)
+
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_4, "_ITM_WU4",
+ BT_FN_VOID_VPTR_I4, ATTR_TM_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_WAR_4, "_ITM_WaRU4",
+ BT_FN_VOID_VPTR_I4, ATTR_TM_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_WAW_4, "_ITM_WaWU4",
+ BT_FN_VOID_VPTR_I4, ATTR_TM_NOTHROW_LIST)
+
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_8, "_ITM_WU8",
+ BT_FN_VOID_VPTR_I8, ATTR_TM_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_WAR_8, "_ITM_WaRU8",
+ BT_FN_VOID_VPTR_I8, ATTR_TM_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_WAW_8, "_ITM_WaWU8",
+ BT_FN_VOID_VPTR_I8, ATTR_TM_NOTHROW_LIST)
+
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_FLOAT, "_ITM_WF",
+ BT_FN_VOID_VPTR_FLOAT, ATTR_TM_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_WAR_FLOAT, "_ITM_WaRF",
+ BT_FN_VOID_VPTR_FLOAT, ATTR_TM_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_WAW_FLOAT, "_ITM_WaWF",
+ BT_FN_VOID_VPTR_FLOAT, ATTR_TM_NOTHROW_LIST)
+
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_DOUBLE, "_ITM_WD",
+ BT_FN_VOID_VPTR_DOUBLE, ATTR_TM_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_WAR_DOUBLE, "_ITM_WaRD",
+ BT_FN_VOID_VPTR_DOUBLE, ATTR_TM_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_WAW_DOUBLE, "_ITM_WaWD",
+ BT_FN_VOID_VPTR_DOUBLE, ATTR_TM_NOTHROW_LIST)
+
+/* These stubs should get defined in the backend if applicable. */
+DEF_BUILTIN_STUB (BUILT_IN_TM_STORE_M64, "__builtin__ITM_WM64")
+DEF_BUILTIN_STUB (BUILT_IN_TM_STORE_WAR_M64, "__builtin__ITM_WaRM64")
+DEF_BUILTIN_STUB (BUILT_IN_TM_STORE_WAW_M64, "__builtin__ITM_WaWM64")
+DEF_BUILTIN_STUB (BUILT_IN_TM_STORE_M128, "__builtin__ITM_WM128")
+DEF_BUILTIN_STUB (BUILT_IN_TM_STORE_WAR_M128, "__builtin__ITM_WaRM128")
+DEF_BUILTIN_STUB (BUILT_IN_TM_STORE_WAW_M128, "__builtin__ITM_WaWM128")
+DEF_BUILTIN_STUB (BUILT_IN_TM_STORE_M256, "__builtin__ITM_WM256")
+DEF_BUILTIN_STUB (BUILT_IN_TM_STORE_WAR_M256, "__builtin__ITM_WaRM256")
+DEF_BUILTIN_STUB (BUILT_IN_TM_STORE_WAW_M256, "__builtin__ITM_WaWM256")
+
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_LDOUBLE, "_ITM_WE",
+ BT_FN_VOID_VPTR_LDOUBLE, ATTR_TM_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_WAR_LDOUBLE, "_ITM_WaRE",
+ BT_FN_VOID_VPTR_LDOUBLE, ATTR_TM_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_STORE_WAW_LDOUBLE, "_ITM_WaWE",
+ BT_FN_VOID_VPTR_LDOUBLE, ATTR_TM_NOTHROW_LIST)
+/* Note: BUILT_IN_TM_STORE_WAW_LDOUBLE must be the last TM store.
+ BUILTIN_TM_STORE_P depends on this. */
+
+/* Reads.
+
+ Note: The reads must follow the following order: LOAD, RAR, RAW, RFW.
+ The TM optimizations depend on this order. */
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_1, "_ITM_RU1",
+ BT_FN_I1_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RAR_1, "_ITM_RaRU1",
+ BT_FN_I1_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RAW_1, "_ITM_RaWU1",
+ BT_FN_I1_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RFW_1, "_ITM_RfWU1",
+ BT_FN_I1_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_2, "_ITM_RU2",
+ BT_FN_I2_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RAR_2, "_ITM_RaRU2",
+ BT_FN_I2_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RAW_2, "_ITM_RaWU2",
+ BT_FN_I2_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RFW_2, "_ITM_RfWU2",
+ BT_FN_I2_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_4, "_ITM_RU4",
+ BT_FN_I4_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RAR_4, "_ITM_RaRU4",
+ BT_FN_I4_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RAW_4, "_ITM_RaWU4",
+ BT_FN_I4_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RFW_4, "_ITM_RfWU4",
+ BT_FN_I4_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_8, "_ITM_RU8",
+ BT_FN_I8_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RAR_8, "_ITM_RaRU8",
+ BT_FN_I8_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RAW_8, "_ITM_RaWU8",
+ BT_FN_I8_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RFW_8, "_ITM_RfWU8",
+ BT_FN_I8_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_FLOAT, "_ITM_RF",
+ BT_FN_FLOAT_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RAR_FLOAT, "_ITM_RaRF",
+ BT_FN_FLOAT_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RAW_FLOAT, "_ITM_RaWF",
+ BT_FN_FLOAT_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RFW_FLOAT, "_ITM_RfWF",
+ BT_FN_FLOAT_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_DOUBLE, "_ITM_RD",
+ BT_FN_DOUBLE_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RAR_DOUBLE, "_ITM_RaRD",
+ BT_FN_FLOAT_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RAW_DOUBLE, "_ITM_RaWD",
+ BT_FN_FLOAT_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RFW_DOUBLE, "_ITM_RfWD",
+ BT_FN_FLOAT_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+
+/* These stubs should get defined in the backend if applicable. */
+DEF_BUILTIN_STUB (BUILT_IN_TM_LOAD_M64, "__builtin__ITM_RM64")
+DEF_BUILTIN_STUB (BUILT_IN_TM_LOAD_RAR_M64, "__builtin__ITM_RaRM64")
+DEF_BUILTIN_STUB (BUILT_IN_TM_LOAD_RAW_M64, "__builtin__ITM_RaRM64")
+DEF_BUILTIN_STUB (BUILT_IN_TM_LOAD_RFW_M64, "__builtin__ITM_RfWM64")
+DEF_BUILTIN_STUB (BUILT_IN_TM_LOAD_M128, "__builtin__ITM_RM128")
+DEF_BUILTIN_STUB (BUILT_IN_TM_LOAD_RAR_M128, "__builtin__ITM_RaRM128")
+DEF_BUILTIN_STUB (BUILT_IN_TM_LOAD_RAW_M128, "__builtin__ITM_RaRM128")
+DEF_BUILTIN_STUB (BUILT_IN_TM_LOAD_RFW_M128, "__builtin__ITM_RfWM128")
+DEF_BUILTIN_STUB (BUILT_IN_TM_LOAD_M256, "__builtin__ITM_RM256")
+DEF_BUILTIN_STUB (BUILT_IN_TM_LOAD_RAR_M256, "__builtin__ITM_RaRM256")
+DEF_BUILTIN_STUB (BUILT_IN_TM_LOAD_RAW_M256, "__builtin__ITM_RaRM256")
+DEF_BUILTIN_STUB (BUILT_IN_TM_LOAD_RFW_M256, "__builtin__ITM_RfWM256")
+
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_LDOUBLE, "_ITM_RE",
+ BT_FN_LDOUBLE_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RAR_LDOUBLE, "_ITM_RaRE",
+ BT_FN_LDOUBLE_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RAW_LDOUBLE, "_ITM_RaWE",
+ BT_FN_LDOUBLE_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+DEF_TM_BUILTIN (BUILT_IN_TM_LOAD_RFW_LDOUBLE, "_ITM_RfWE",
+ BT_FN_LDOUBLE_VPTR, ATTR_TM_PURE_TMPURE_NOTHROW_LIST)
+
+/* Note: BUILT_IN_TM_LOAD_RFW_LDOUBLE must be the last TM load as well
+ as the last builtin. BUILTIN_TM_LOAD_STORE_P and BUILTIN_TM_LOAD_P
+ depend on this. */
diff --git a/gcc/ipa-inline.c b/gcc/ipa-inline.c
index 31c88e501e7..3dadf8d12d7 100644
--- a/gcc/ipa-inline.c
+++ b/gcc/ipa-inline.c
@@ -284,6 +284,14 @@ can_inline_edge_p (struct cgraph_edge *e, bool report)
e->inline_failed = CIF_EH_PERSONALITY;
inlinable = false;
}
+ /* TM pure functions should not get inlined if the outer function is
+ a TM safe function. */
+ else if (is_tm_pure (callee->decl)
+ && is_tm_safe (e->caller->decl))
+ {
+ e->inline_failed = CIF_UNSPECIFIED;
+ inlinable = false;
+ }
/* Don't inline if the callee can throw non-call exceptions but the
caller cannot.
FIXME: this is obviously wrong for LTO where STRUCT_FUNCTION is missing.
diff --git a/gcc/omp-low.c b/gcc/omp-low.c
index 81459577937..dc61c0bdf54 100644
--- a/gcc/omp-low.c
+++ b/gcc/omp-low.c
@@ -139,6 +139,7 @@ static tree scan_omp_1_op (tree *, int *, void *);
case GIMPLE_TRY: \
case GIMPLE_CATCH: \
case GIMPLE_EH_FILTER: \
+ case GIMPLE_TRANSACTION: \
/* The sub-statements for these should be walked. */ \
*handled_ops_p = false; \
break;
diff --git a/gcc/opts.c b/gcc/opts.c
index 3153fe50274..9fdb22631d0 100644
--- a/gcc/opts.c
+++ b/gcc/opts.c
@@ -784,6 +784,8 @@ finish_options (struct gcc_options *opts, struct gcc_options *opts_set,
#endif
if (!opts->x_flag_fat_lto_objects && !HAVE_LTO_PLUGIN)
error_at (loc, "-fno-fat-lto-objects are supported only with linker plugin.");
+ if (opts->x_flag_tm)
+ error_at (loc, "LTO is currently not supported with transactional memory");
}
if ((opts->x_flag_lto_partition_balanced != 0) + (opts->x_flag_lto_partition_1to1 != 0)
+ (opts->x_flag_lto_partition_none != 0) >= 1)
diff --git a/gcc/output.h b/gcc/output.h
index 661b623807c..e47eddf2735 100644
--- a/gcc/output.h
+++ b/gcc/output.h
@@ -606,6 +606,10 @@ extern bool unlikely_text_section_p (section *);
extern void switch_to_section (section *);
extern void output_section_asm_op (const void *);
+extern void record_tm_clone_pair (tree, tree);
+extern void finish_tm_clone_pairs (void);
+extern tree get_tm_clone_pair (tree);
+
extern void default_asm_output_source_filename (FILE *, const char *);
extern void output_file_directive (FILE *, const char *);
diff --git a/gcc/params.def b/gcc/params.def
index a7ae0918579..239b684b5fc 100644
--- a/gcc/params.def
+++ b/gcc/params.def
@@ -872,6 +872,13 @@ DEFPARAM (PARAM_IPA_SRA_PTR_GROWTH_FACTOR,
"a pointer to an aggregate with",
2, 0, 0)
+DEFPARAM (PARAM_TM_MAX_AGGREGATE_SIZE,
+ "tm-max-aggregate-size",
+ "Size in bytes after which thread-local aggregates should be "
+ "instrumented with the logging functions instead of save/restore "
+ "pairs",
+ 9, 0, 0)
+
DEFPARAM (PARAM_IPA_CP_VALUE_LIST_SIZE,
"ipa-cp-value-list-size",
"Maximum size of a list of values associated with each parameter for "
diff --git a/gcc/passes.c b/gcc/passes.c
index 887007f6dde..a3512419ee2 100644
--- a/gcc/passes.c
+++ b/gcc/passes.c
@@ -1174,9 +1174,11 @@ init_optimization_passes (void)
p = &all_lowering_passes;
NEXT_PASS (pass_warn_unused_result);
NEXT_PASS (pass_diagnose_omp_blocks);
+ NEXT_PASS (pass_diagnose_tm_blocks);
NEXT_PASS (pass_mudflap_1);
NEXT_PASS (pass_lower_omp);
NEXT_PASS (pass_lower_cf);
+ NEXT_PASS (pass_lower_tm);
NEXT_PASS (pass_refactor_eh);
NEXT_PASS (pass_lower_eh);
NEXT_PASS (pass_build_cfg);
@@ -1241,6 +1243,7 @@ init_optimization_passes (void)
}
NEXT_PASS (pass_ipa_increase_alignment);
NEXT_PASS (pass_ipa_matrix_reorg);
+ NEXT_PASS (pass_ipa_tm);
NEXT_PASS (pass_ipa_lower_emutls);
*p = NULL;
@@ -1400,6 +1403,13 @@ init_optimization_passes (void)
NEXT_PASS (pass_uncprop);
NEXT_PASS (pass_local_pure_const);
}
+ NEXT_PASS (pass_tm_init);
+ {
+ struct opt_pass **p = &pass_tm_init.pass.sub;
+ NEXT_PASS (pass_tm_mark);
+ NEXT_PASS (pass_tm_memopt);
+ NEXT_PASS (pass_tm_edges);
+ }
NEXT_PASS (pass_lower_complex_O0);
NEXT_PASS (pass_cleanup_eh);
NEXT_PASS (pass_lower_resx);
diff --git a/gcc/print-tree.c b/gcc/print-tree.c
index 1a1e33f1342..7fb71d0b475 100644
--- a/gcc/print-tree.c
+++ b/gcc/print-tree.c
@@ -424,6 +424,8 @@ print_node (FILE *file, const char *prefix, tree node, int indent)
fputs (" built-in", file);
if (code == FUNCTION_DECL && DECL_STATIC_CHAIN (node))
fputs (" static-chain", file);
+ if (TREE_CODE (node) == FUNCTION_DECL && decl_is_tm_clone (node))
+ fputs (" tm-clone", file);
if (code == FIELD_DECL && DECL_PACKED (node))
fputs (" packed", file);
diff --git a/gcc/recog.c b/gcc/recog.c
index d3ecb73c4e8..ae05204eee5 100644
--- a/gcc/recog.c
+++ b/gcc/recog.c
@@ -3287,6 +3287,7 @@ peep2_attempt (basic_block bb, rtx insn, int match_len, rtx attempt)
{
case REG_NORETURN:
case REG_SETJMP:
+ case REG_TM:
add_reg_note (new_insn, REG_NOTE_KIND (note),
XEXP (note, 0));
break;
diff --git a/gcc/reg-notes.def b/gcc/reg-notes.def
index d103afee018..f2f097385a6 100644
--- a/gcc/reg-notes.def
+++ b/gcc/reg-notes.def
@@ -203,6 +203,11 @@ REG_NOTE (CROSSING_JUMP)
functions that can return twice. */
REG_NOTE (SETJMP)
+/* This kind of note is generated at each transactional memory
+ builtin, to indicate we need to generate transaction restart
+ edges for this insn. */
+REG_NOTE (TM)
+
/* Indicates the cumulative offset of the stack pointer accounting
for pushed arguments. This will only be generated when
ACCUMULATE_OUTGOING_ARGS is false. */
diff --git a/gcc/rtlanal.c b/gcc/rtlanal.c
index 54d9eb1ef67..7c4a49bef09 100644
--- a/gcc/rtlanal.c
+++ b/gcc/rtlanal.c
@@ -1918,6 +1918,7 @@ alloc_reg_note (enum reg_note kind, rtx datum, rtx list)
case REG_CC_USER:
case REG_LABEL_TARGET:
case REG_LABEL_OPERAND:
+ case REG_TM:
/* These types of register notes use an INSN_LIST rather than an
EXPR_LIST, so that copying is done right and dumps look
better. */
diff --git a/gcc/target.def b/gcc/target.def
index a83088d2900..62bd06e976f 100644
--- a/gcc/target.def
+++ b/gcc/target.def
@@ -1003,6 +1003,24 @@ DEFHOOK
(enum machine_mode mode, const_tree type, int misalignment, bool is_packed),
default_builtin_support_vector_misalignment)
+/* Return the builtin decl needed to load a vector of TYPE. */
+DEFHOOK
+(builtin_tm_load,
+ "This hook should return the built-in decl needed to load a vector of the "
+ "given type within a transaction.",
+ tree,
+ (tree),
+ default_builtin_tm_load_store)
+
+/* Return the builtin decl needed to store a vector of TYPE. */
+DEFHOOK
+(builtin_tm_store,
+ "This hook should return the built-in decl needed to store a vector of the "
+ "given type within a transaction.",
+ tree,
+ (tree),
+ default_builtin_tm_load_store)
+
/* Returns the preferred mode for SIMD operations for the specified
scalar mode. */
DEFHOOK
diff --git a/gcc/targhooks.c b/gcc/targhooks.c
index 81fd12f7b78..2b4fd27bdfa 100644
--- a/gcc/targhooks.c
+++ b/gcc/targhooks.c
@@ -1214,6 +1214,12 @@ default_have_conditional_execution (void)
#endif
}
+tree
+default_builtin_tm_load_store (tree ARG_UNUSED (type))
+{
+ return NULL_TREE;
+}
+
/* Compute cost of moving registers to/from memory. */
int
diff --git a/gcc/targhooks.h b/gcc/targhooks.h
index f19fb506bf5..861811543f8 100644
--- a/gcc/targhooks.h
+++ b/gcc/targhooks.h
@@ -152,6 +152,9 @@ extern bool default_addr_space_subset_p (addr_space_t, addr_space_t);
extern rtx default_addr_space_convert (rtx, tree, tree);
extern unsigned int default_case_values_threshold (void);
extern bool default_have_conditional_execution (void);
+
+extern tree default_builtin_tm_load_store (tree);
+
extern int default_memory_move_cost (enum machine_mode, reg_class_t, bool);
extern int default_register_move_cost (enum machine_mode, reg_class_t,
reg_class_t);
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index a62b11cbebf..c095293b347 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,3 +1,14 @@
+2011-11-07 Richard Henderson <rth@redhat.com>
+ Aldy Hernandez <aldyh@redhat.com>
+ Torvald Riegel <triegel@redhat.com>
+
+ Merged from transactional-memory.
+
+ * g++.dg/dg.exp: Run transactional memory tests.
+ * g++.dg/tm: New directory with new tests.
+ * gcc.dg/tm: New directory with new tests.
+ * c-c++-common/tm: New directory with new tests.
+
2011-11-08 Dodji Seketeli <dodji@redhat.com>
Fix context handling of alias-declaration
diff --git a/gcc/testsuite/c-c++-common/tm/20100127.c b/gcc/testsuite/c-c++-common/tm/20100127.c
new file mode 100644
index 00000000000..c25336d682a
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tm/20100127.c
@@ -0,0 +1,36 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -O -fdump-tree-tmmark" } */
+
+/* Test that `nontrxn' doesn't end up inside the transaction. */
+
+typedef struct node {
+ int * val;
+ struct node *next;
+} node_t;
+
+node_t *next;
+int nontrxn1, nontrxn;
+
+static int set_remove(int * val)
+{
+ int result;
+ int * v;
+ __transaction_relaxed {
+ v = next->val;
+ result = (v == val);
+ if (result)
+ result = 2;
+ }
+ return result;
+}
+
+void test(void *data)
+{
+ extern void bark(void);
+ if (set_remove(0))
+ bark();
+ nontrxn = 99; /* Should be outside transaction. */
+}
+
+/* { dg-final { scan-tree-dump-times "_ITM_W.*nontrxn" 0 "tmmark" } } */
+/* { dg-final { cleanup-tree-dump "tmmark" } } */
diff --git a/gcc/testsuite/c-c++-common/tm/abort-1.c b/gcc/testsuite/c-c++-common/tm/abort-1.c
new file mode 100644
index 00000000000..90830f41cf3
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tm/abort-1.c
@@ -0,0 +1,6 @@
+/* { dg-do compile } */
+
+void f(void)
+{
+ __transaction_cancel; /* { dg-error "without transactional" } */
+}
diff --git a/gcc/testsuite/c-c++-common/tm/abort-2.c b/gcc/testsuite/c-c++-common/tm/abort-2.c
new file mode 100644
index 00000000000..727c63432ac
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tm/abort-2.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm" } */
+
+int g;
+void f(void)
+{
+ __transaction_atomic {
+ if (g == 0)
+ __transaction_cancel;
+ }
+}
diff --git a/gcc/testsuite/c-c++-common/tm/abort-3.c b/gcc/testsuite/c-c++-common/tm/abort-3.c
new file mode 100644
index 00000000000..f2cf5b5dc2a
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tm/abort-3.c
@@ -0,0 +1,8 @@
+/* { dg-do compile } */
+
+void f(void)
+{
+ __transaction_atomic { /* { dg-error "__transaction_atomic. without trans" } */
+ __transaction_cancel; /* { dg-error "_cancel. without trans" } */
+ }
+}
diff --git a/gcc/testsuite/c-c++-common/tm/atomic-1.c b/gcc/testsuite/c-c++-common/tm/atomic-1.c
new file mode 100644
index 00000000000..e301f1f48cf
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tm/atomic-1.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+
+int g;
+void f(void)
+{
+ __transaction_atomic { /* { dg-error "without transactional memory" } */
+ g++;
+ }
+}
diff --git a/gcc/testsuite/c-c++-common/tm/atomic-2.c b/gcc/testsuite/c-c++-common/tm/atomic-2.c
new file mode 100644
index 00000000000..f232766240a
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tm/atomic-2.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm" } */
+
+int g;
+void f(void)
+{
+ __transaction_atomic {
+ g++;
+ }
+}
diff --git a/gcc/testsuite/c-c++-common/tm/attrib-1.c b/gcc/testsuite/c-c++-common/tm/attrib-1.c
new file mode 100644
index 00000000000..536aeb33fdd
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tm/attrib-1.c
@@ -0,0 +1,25 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm" } */
+
+#define TC __attribute__((transaction_callable))
+#define TU __attribute__((transaction_unsafe))
+#define TP __attribute__((transaction_pure))
+#define TS __attribute__((transaction_safe))
+extern void f1(void) TC;
+extern void f2(void) TU;
+extern void f3(void) TP;
+extern void f4(void) TS;
+
+extern void g1(void) TC TS; /* { dg-error "previously declared" } */
+
+extern int v1 TP; /* { dg-warning "ignored" } */
+
+typedef void t1(void) TC;
+typedef void (*t2)(void) TC;
+typedef int t3 TC; /* { dg-warning "ignored" } */
+
+typedef void u0(void);
+typedef u0 u1 TC;
+typedef u1 u2 TP; /* { dg-error "previously declared" } */
+typedef u0 *u3 TS;
+typedef u3 u4 TU; /* { dg-error "previously declared" } */
diff --git a/gcc/testsuite/c-c++-common/tm/cancel-1.c b/gcc/testsuite/c-c++-common/tm/cancel-1.c
new file mode 100644
index 00000000000..6d60f2648c4
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tm/cancel-1.c
@@ -0,0 +1,17 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm" } */
+
+void unsafe(void) __attribute__((transaction_unsafe));
+
+void
+f(void)
+{
+ int a;
+ __transaction_atomic {
+ a = 1;
+ __transaction_atomic {
+ __transaction_cancel;
+ }
+ }
+ unsafe();
+}
diff --git a/gcc/testsuite/c-c++-common/tm/freq.c b/gcc/testsuite/c-c++-common/tm/freq.c
new file mode 100644
index 00000000000..31df167fae9
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tm/freq.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -O" } */
+
+extern __attribute__((transaction_safe)) void TMreleaseNode ();
+
+int global;
+
+__attribute__((transaction_safe))
+void
+TMrbtree_insert ()
+{
+ if (global)
+ TMreleaseNode();
+}
diff --git a/gcc/testsuite/c-c++-common/tm/inline-asm-2.c b/gcc/testsuite/c-c++-common/tm/inline-asm-2.c
new file mode 100644
index 00000000000..7d429fbbc85
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tm/inline-asm-2.c
@@ -0,0 +1,8 @@
+// { dg-do compile }
+// { dg-options "-fgnu-tm" }
+
+__attribute__((transaction_callable))
+void func()
+{
+ __asm__ ("");
+}
diff --git a/gcc/testsuite/c-c++-common/tm/inline-asm.c b/gcc/testsuite/c-c++-common/tm/inline-asm.c
new file mode 100644
index 00000000000..eefd347dc8f
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tm/inline-asm.c
@@ -0,0 +1,17 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -O1" } */
+
+static inline void
+inline_death ()
+{
+ __asm__ (""); /* { dg-error "asm not allowed" } */
+}
+
+void
+tranfunction ()
+{
+ __transaction_atomic
+ {
+ inline_death ();
+ }
+}
diff --git a/gcc/testsuite/c-c++-common/tm/ipa-1.c b/gcc/testsuite/c-c++-common/tm/ipa-1.c
new file mode 100644
index 00000000000..961f7fe79c0
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tm/ipa-1.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -O -fdump-ipa-tmipa" } */
+
+int val, george;
+
+extern void func();
+
+int set_remove(void)
+{
+ int result = 8;
+ __transaction_atomic {
+ result = george;
+ if (val)
+ goto out;
+ }
+ out:
+ func();
+ return result;
+}
+
+
+/* { dg-final { scan-ipa-dump-not "getTMCloneOrIrrevocable" "tmipa" } } */
+/* { dg-final { cleanup-ipa-dump "tmipa" } } */
diff --git a/gcc/testsuite/c-c++-common/tm/malloc.c b/gcc/testsuite/c-c++-common/tm/malloc.c
new file mode 100644
index 00000000000..de7a766e36c
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tm/malloc.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -fdump-tree-tmmark" } */
+
+#include <stdlib.h>
+
+char *z;
+
+void foobar(void)
+{
+ char *p, *q;
+ __transaction_atomic {
+ p = (char *)malloc(123);
+ q = (char *)calloc(555,1);
+ free(q);
+ free(p);
+ }
+ z = (char *)malloc (666);
+}
+
+/* { dg-final { scan-tree-dump-times " malloc .666" 1 "tmmark" } } */
+/* { dg-final { scan-tree-dump-times "__builtin__ITM_malloc" 1 "tmmark" } } */
+/* { dg-final { scan-tree-dump-times "__builtin__ITM_calloc" 1 "tmmark" } } */
+/* { dg-final { scan-tree-dump-times "__builtin__ITM_free" 2 "tmmark" } } */
+/* { dg-final { cleanup-tree-dump "tmmark" } } */
diff --git a/gcc/testsuite/c-c++-common/tm/memcpy-1.c b/gcc/testsuite/c-c++-common/tm/memcpy-1.c
new file mode 100644
index 00000000000..fa841b26164
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tm/memcpy-1.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm" } */
+#include <string.h>
+
+__attribute__((transaction_safe))
+void *wmemcpy(void *dest, const void *src, size_t n)
+{
+ return memcpy(dest, src, n);
+}
diff --git a/gcc/testsuite/c-c++-common/tm/omp.c b/gcc/testsuite/c-c++-common/tm/omp.c
new file mode 100644
index 00000000000..b9fcc765efc
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tm/omp.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -fopenmp" } */
+
+__attribute__ ((transaction_pure))
+unsigned long rdtsc();
+
+typedef struct ENTER_EXIT_TIMES
+{
+ unsigned long enter;
+} times_t;
+
+void ParClassify()
+{
+ void * Parent;
+#pragma omp parallel private(Parent)
+ {
+ times_t inside;
+ __transaction_atomic {
+ inside.enter = rdtsc();
+ }
+ }
+}
diff --git a/gcc/testsuite/c-c++-common/tm/outer-1.c b/gcc/testsuite/c-c++-common/tm/outer-1.c
new file mode 100644
index 00000000000..7dbf2e8310a
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tm/outer-1.c
@@ -0,0 +1,31 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm" } */
+
+void mco(void) __attribute__((transaction_may_cancel_outer));
+
+void
+f(void)
+{
+ mco(); /* { dg-error "" } */
+ __transaction_atomic {
+ mco(); /* { dg-error "" } */
+ }
+ __transaction_relaxed {
+ mco(); /* { dg-error "" } */
+ }
+ __transaction_atomic [[outer]] {
+ mco();
+ }
+}
+
+void __attribute__((transaction_may_cancel_outer))
+g(void)
+{
+ mco();
+ __transaction_atomic {
+ mco();
+ }
+ __transaction_atomic [[outer]] { /* { dg-error "" } */
+ mco();
+ }
+}
diff --git a/gcc/testsuite/c-c++-common/tm/safe-1.c b/gcc/testsuite/c-c++-common/tm/safe-1.c
new file mode 100644
index 00000000000..b2a43530575
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tm/safe-1.c
@@ -0,0 +1,69 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm" } */
+
+void ts(void) __attribute__((transaction_safe));
+void tp(void) __attribute__((transaction_pure));
+void tc(void) __attribute__((transaction_callable));
+void ti(void) __attribute__((transaction_unsafe));
+void tm(void) __attribute__((transaction_may_cancel_outer));
+void tu(void);
+int fc(int) __attribute__((const));
+
+typedef void (*Fs) (void) __attribute__((transaction_safe));
+typedef void (*Fc) (void) __attribute__((transaction_callable));
+typedef void (*Fi) (void) __attribute__((transaction_unsafe));
+typedef void (*Fm) (void) __attribute__((transaction_may_cancel_outer));
+extern Fs ps;
+extern Fc pc;
+extern Fi pi;
+extern Fm pm;
+extern void (*pu)(void);
+
+int __attribute__((transaction_safe))
+foo(void)
+{
+ int i;
+
+ ts();
+ tp();
+ tc(); /* { dg-error "unsafe function call" } */
+ ti(); /* { dg-error "unsafe function call" } */
+
+ /* ??? Direct function calls without markups are handled later
+ than pass_diagnose_tm_blocks, which means we'll exit with
+ errors before getting there. This test moved to safe-3.c. */
+ /* tu(); */
+
+ (*ps)();
+ (*pc)(); /* { dg-error "unsafe function call" } */
+ (*pi)(); /* { dg-error "unsafe function call" } */
+ (*pu)(); /* { dg-error "unsafe function call" } */
+
+ asm(""); /* { dg-error "asm not allowed" } */
+ asm("" : "=g"(i)); /* { dg-error "asm not allowed" } */
+
+ return fc(i);
+}
+
+int __attribute__((transaction_may_cancel_outer))
+bar(void)
+{
+ int i;
+
+ ts();
+ tp();
+ tc(); /* { dg-error "unsafe function call" } */
+ ti(); /* { dg-error "unsafe function call" } */
+ tm();
+
+ (*ps)();
+ (*pc)(); /* { dg-error "unsafe function call" } */
+ (*pi)(); /* { dg-error "unsafe function call" } */
+ (*pm)();
+ (*pu)(); /* { dg-error "unsafe function call" } */
+
+ asm(""); /* { dg-error "asm not allowed" } */
+ asm("" : "=g"(i)); /* { dg-error "asm not allowed" } */
+
+ return fc(i);
+}
diff --git a/gcc/testsuite/c-c++-common/tm/safe-2.c b/gcc/testsuite/c-c++-common/tm/safe-2.c
new file mode 100644
index 00000000000..a6729ba428b
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tm/safe-2.c
@@ -0,0 +1,43 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm" } */
+
+void mco(void) __attribute__((transaction_may_cancel_outer));
+
+void
+f(void)
+{
+ mco(); /* { dg-error "" } */
+ __transaction_atomic {
+ mco(); /* { dg-error "" } */
+ }
+ __transaction_relaxed {
+ mco(); /* { dg-error "" } */
+ }
+ __transaction_atomic [[outer]] {
+ mco();
+ }
+ __transaction_atomic [[outer]] {
+ __transaction_atomic {
+ __transaction_atomic {
+ __transaction_atomic {
+ mco();
+ }
+ }
+ }
+ }
+}
+
+void __attribute__((transaction_may_cancel_outer))
+g(void)
+{
+ mco();
+ __transaction_atomic {
+ __transaction_atomic {
+ __transaction_atomic {
+ __transaction_atomic {
+ mco();
+ }
+ }
+ }
+ }
+}
diff --git a/gcc/testsuite/c-c++-common/tm/safe-3.c b/gcc/testsuite/c-c++-common/tm/safe-3.c
new file mode 100644
index 00000000000..8a883db5ef0
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tm/safe-3.c
@@ -0,0 +1,48 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm" } */
+
+void f_extern (void);
+void f_first (void);
+void f_later (void);
+
+extern int x;
+
+void f_first (void) { x++; }
+
+void __attribute__((transaction_safe))
+test_safe (void)
+{
+ f_extern (); /* { dg-error "unsafe function call" } */
+ f_first ();
+ f_later ();
+}
+
+void __attribute__((transaction_may_cancel_outer))
+test_mco (void)
+{
+ f_extern (); /* { dg-error "unsafe function call" } */
+ f_first ();
+ f_later ();
+}
+
+void
+test_atomic (void)
+{
+ __transaction_atomic {
+ f_extern (); /* { dg-error "unsafe function call" } */
+ f_first ();
+ f_later ();
+ }
+ __transaction_relaxed {
+ f_extern ();
+ f_first ();
+ f_later ();
+ }
+ __transaction_atomic [[outer]] {
+ f_extern (); /* { dg-error "unsafe function call" } */
+ f_first ();
+ f_later ();
+ }
+}
+
+void f_later () { f_first(); test_safe(); }
diff --git a/gcc/testsuite/c-c++-common/tm/trxn-expr-2.c b/gcc/testsuite/c-c++-common/tm/trxn-expr-2.c
new file mode 100644
index 00000000000..0ef6526db7b
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tm/trxn-expr-2.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* Make sure that we don't just crash without -fgnu-tm enabled. */
+/* { dg-options "" } */
+
+int x;
+
+int foo(void)
+{
+ return __transaction_atomic (x + 1); /* { dg-error "" } */
+}
+
+int bar(void)
+{
+ return __transaction_relaxed (x + 1); /* { dg-error "" } */
+}
diff --git a/gcc/testsuite/c-c++-common/tm/trxn-expr.c b/gcc/testsuite/c-c++-common/tm/trxn-expr.c
new file mode 100644
index 00000000000..53d4677e3e6
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tm/trxn-expr.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -fdump-tree-tmmark" } */
+
+int y, x, york;
+
+void foobar(void)
+{
+ x = y + __transaction_atomic (york);
+}
+
+/* { dg-final { scan-tree-dump-times "_ITM_RU.*york" 1 "tmmark" } } */
+/* { dg-final { scan-tree-dump-times "_ITM_RU" 1 "tmmark" } } */
+/* { dg-final { cleanup-tree-dump "tmmark" } } */
diff --git a/gcc/testsuite/c-c++-common/tm/wrap-1.c b/gcc/testsuite/c-c++-common/tm/wrap-1.c
new file mode 100644
index 00000000000..04b5b6f0dcb
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/tm/wrap-1.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -fdump-tree-optimized" } */
+
+void orig(void);
+void xyzzy(void) __attribute__((transaction_wrap (orig)));
+
+void foo() { __transaction_relaxed { orig (); } }
+
+/* { dg-final { scan-tree-dump-times "xyzzy" 1 "optimized" } } */
+/* { dg-final { cleanup-tree-dump "optimized" } } */
diff --git a/gcc/testsuite/g++.dg/dg.exp b/gcc/testsuite/g++.dg/dg.exp
index cee19d68029..ad1f7e23700 100644
--- a/gcc/testsuite/g++.dg/dg.exp
+++ b/gcc/testsuite/g++.dg/dg.exp
@@ -47,6 +47,7 @@ set tests [prune $tests $srcdir/$subdir/gomp/*]
set tests [prune $tests $srcdir/$subdir/tree-prof/*]
set tests [prune $tests $srcdir/$subdir/torture/*]
set tests [prune $tests $srcdir/$subdir/graphite/*]
+set tests [prune $tests $srcdir/$subdir/tm/*]
set tests [prune $tests $srcdir/$subdir/guality/*]
set tests [prune $tests $srcdir/$subdir/simulate-thread/*]
diff --git a/gcc/testsuite/g++.dg/tm/20100429.C b/gcc/testsuite/g++.dg/tm/20100429.C
new file mode 100644
index 00000000000..087ce32b2a6
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tm/20100429.C
@@ -0,0 +1,15 @@
+// { dg-do compile }
+// { dg-options "-fgnu-tm" }
+
+int foo(int a);
+int foo(float a);
+int
+bar(int a)
+{
+ int r;
+ __transaction_atomic
+ {
+ r = foo(a); // { dg-error "unsafe function call 'int foo\\(int\\)'" }
+ }
+ return r;
+}
diff --git a/gcc/testsuite/g++.dg/tm/20100727.C b/gcc/testsuite/g++.dg/tm/20100727.C
new file mode 100644
index 00000000000..bda2df0607c
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tm/20100727.C
@@ -0,0 +1,796 @@
+// { dg-do compile }
+// { dg-options "-fgnu-tm" }
+
+typedef long int ptrdiff_t;
+typedef long unsigned int size_t;
+namespace std __attribute__ ((__visibility__ ("default")))
+{
+ using::ptrdiff_t;
+ using::size_t;
+}
+
+namespace std __attribute__ ((__visibility__ ("default")))
+{
+ struct input_iterator_tag
+ {
+ };
+ struct output_iterator_tag
+ {
+ };
+ struct forward_iterator_tag:public input_iterator_tag
+ {
+ };
+ struct bidirectional_iterator_tag:public forward_iterator_tag
+ {
+ };
+ struct random_access_iterator_tag:public bidirectional_iterator_tag
+ {
+ };
+ template < typename _Category, typename _Tp, typename _Distance =
+ ptrdiff_t, typename _Pointer = _Tp *, typename _Reference =
+ _Tp & >struct iterator
+ {
+ typedef _Category iterator_category;
+ typedef _Tp value_type;
+ typedef _Distance difference_type;
+ typedef _Pointer pointer;
+ typedef _Reference reference;
+ };
+ template < typename _Iterator > struct iterator_traits
+ {
+ typedef typename _Iterator::iterator_category iterator_category;
+ typedef typename _Iterator::value_type value_type;
+ typedef typename _Iterator::difference_type difference_type;
+ typedef typename _Iterator::pointer pointer;
+ typedef typename _Iterator::reference reference;
+ };
+ template < typename _Tp > struct iterator_traits <_Tp * >
+ {
+ typedef random_access_iterator_tag iterator_category;
+ typedef _Tp value_type;
+ typedef ptrdiff_t difference_type;
+ typedef _Tp *pointer;
+ typedef _Tp & reference;
+ };
+ template < typename _Tp > struct iterator_traits <const _Tp *>
+ {
+ typedef random_access_iterator_tag iterator_category;
+ typedef _Tp value_type;
+ typedef ptrdiff_t difference_type;
+ typedef const _Tp *pointer;
+ typedef const _Tp & reference;
+ };
+ template < typename _Iter > inline typename iterator_traits <
+ _Iter >::iterator_category __iterator_category (const _Iter &)
+ {
+ return typename iterator_traits < _Iter >::iterator_category ();
+ }
+}
+
+namespace std __attribute__ ((__visibility__ ("default")))
+{
+template < typename _Iterator > class reverse_iterator:public iterator < typename iterator_traits < _Iterator >::iterator_category,
+ typename iterator_traits < _Iterator >::value_type,
+ typename iterator_traits < _Iterator >::difference_type,
+ typename iterator_traits < _Iterator >::pointer,
+ typename iterator_traits < _Iterator >::reference >
+ {
+ protected:_Iterator current;
+ typedef iterator_traits < _Iterator > __traits_type;
+ public:typedef _Iterator iterator_type;
+ typedef typename __traits_type::difference_type difference_type;
+ typedef typename __traits_type::pointer pointer;
+ typedef typename __traits_type::reference reference;
+ reverse_iterator ():current ()
+ {
+ } explicit reverse_iterator (iterator_type __x):current (__x)
+ {
+ } reverse_iterator (const reverse_iterator & __x):current (__x.current)
+ {
+ } template < typename _Iter > reverse_iterator (const reverse_iterator <
+ _Iter >
+ &__x):current (__x.
+ base ())
+ {
+ } iterator_type base () const
+ {
+ return current;
+ }
+ reference operator* () const
+ {
+ _Iterator __tmp = current;
+ return *--__tmp;
+ }
+ pointer operator-> () const
+ {
+ return &(operator* ());
+ }
+ reverse_iterator & operator++ ()
+ {
+ --current;
+ return *this;
+ }
+ reverse_iterator operator++ (int)
+ {
+ reverse_iterator __tmp = *this;
+ --current;
+ return __tmp;
+ }
+ reverse_iterator & operator-- ()
+ {
+ ++current;
+ return *this;
+ }
+ reverse_iterator operator-- (int)
+ {
+ reverse_iterator __tmp = *this;
+ ++current;
+ return __tmp;
+ }
+ reverse_iterator operator+ (difference_type __n) const
+ {
+ return reverse_iterator (current - __n);
+ }
+ reverse_iterator & operator+= (difference_type __n)
+ {
+ current -= __n;
+ return *this;
+ }
+ reverse_iterator operator- (difference_type __n) const
+ {
+ return reverse_iterator (current + __n);
+ }
+ reverse_iterator & operator-= (difference_type __n)
+ {
+ current += __n;
+ return *this;
+ }
+ reference operator[] (difference_type __n) const
+ {
+ return *(*this + __n);
+ }
+ };
+ template < typename _Iterator >
+ inline bool operator== (const reverse_iterator < _Iterator > &__x,
+ const reverse_iterator < _Iterator > &__y)
+ {
+ return __x.base () == __y.base ();
+ }
+ template < typename _Iterator >
+ inline bool operator< (const reverse_iterator < _Iterator > &__x,
+ const reverse_iterator < _Iterator > &__y)
+ {
+ return __y.base () < __x.base ();
+ }
+ template < typename _Iterator >
+ inline bool operator!= (const reverse_iterator < _Iterator > &__x,
+ const reverse_iterator < _Iterator > &__y)
+ {
+ return !(__x == __y);
+ }
+ template < typename _Iterator >
+ inline bool operator> (const reverse_iterator < _Iterator > &__x,
+ const reverse_iterator < _Iterator > &__y)
+ {
+ return __y < __x;
+ }
+ template < typename _Iterator >
+ inline bool operator<= (const reverse_iterator < _Iterator > &__x,
+ const reverse_iterator < _Iterator > &__y)
+ {
+ return !(__y < __x);
+ }
+ template < typename _Iterator >
+ inline bool operator>= (const reverse_iterator < _Iterator > &__x,
+ const reverse_iterator < _Iterator > &__y)
+ {
+ return !(__x < __y);
+ }
+ template < typename _Iterator > inline typename reverse_iterator <
+ _Iterator >::difference_type operator- (const reverse_iterator <
+ _Iterator > &__x,
+ const reverse_iterator <
+ _Iterator > &__y)
+ {
+ return __y.base () - __x.base ();
+ }
+ template < typename _Iterator > inline reverse_iterator < _Iterator >
+ operator+ (typename reverse_iterator < _Iterator >::difference_type __n,
+ const reverse_iterator < _Iterator > &__x)
+ {
+ return reverse_iterator < _Iterator > (__x.base () - __n);
+ }
+ template < typename _IteratorL,
+ typename _IteratorR > inline bool operator== (const reverse_iterator <
+ _IteratorL > &__x,
+ const reverse_iterator <
+ _IteratorR > &__y)
+ {
+ return __x.base () == __y.base ();
+ }
+ template < typename _IteratorL,
+ typename _IteratorR > inline bool operator< (const reverse_iterator <
+ _IteratorL > &__x,
+ const reverse_iterator <
+ _IteratorR > &__y)
+ {
+ return __y.base () < __x.base ();
+ }
+ template < typename _IteratorL,
+ typename _IteratorR > inline bool operator!= (const reverse_iterator <
+ _IteratorL > &__x,
+ const reverse_iterator <
+ _IteratorR > &__y)
+ {
+ return !(__x == __y);
+ }
+ template < typename _IteratorL,
+ typename _IteratorR > inline bool operator> (const reverse_iterator <
+ _IteratorL > &__x,
+ const reverse_iterator <
+ _IteratorR > &__y)
+ {
+ return __y < __x;
+ }
+ template < typename _IteratorL,
+ typename _IteratorR > inline bool operator<= (const reverse_iterator <
+ _IteratorL > &__x,
+ const reverse_iterator <
+ _IteratorR > &__y)
+ {
+ return !(__y < __x);
+ }
+ template < typename _IteratorL,
+ typename _IteratorR > inline bool operator>= (const reverse_iterator <
+ _IteratorL > &__x,
+ const reverse_iterator <
+ _IteratorR > &__y)
+ {
+ return !(__x < __y);
+ }
+ template < typename _IteratorL,
+ typename _IteratorR > inline typename reverse_iterator <
+ _IteratorL >::difference_type operator- (const reverse_iterator <
+ _IteratorL > &__x,
+ const reverse_iterator <
+ _IteratorR > &__y)
+ {
+ return __y.base () - __x.base ();
+ }
+template < typename _Container > class back_insert_iterator:public iterator < output_iterator_tag, void, void, void,
+ void >
+ {
+ protected:_Container * container;
+ public:typedef _Container container_type;
+ explicit back_insert_iterator (_Container & __x):container (&__x)
+ {
+ } back_insert_iterator & operator= (typename _Container::
+ const_reference __value)
+ {
+ container->push_back (__value);
+ return *this;
+ }
+ back_insert_iterator & operator* ()
+ {
+ return *this;
+ }
+ back_insert_iterator & operator++ ()
+ {
+ return *this;
+ }
+ back_insert_iterator operator++ (int)
+ {
+ return *this;
+ }
+ };
+ template < typename _Container > inline back_insert_iterator < _Container >
+ back_inserter (_Container & __x)
+ {
+ return back_insert_iterator < _Container > (__x);
+ }
+template < typename _Container > class front_insert_iterator:public iterator < output_iterator_tag, void, void, void,
+ void >
+ {
+ protected:_Container * container;
+ public:typedef _Container container_type;
+ explicit front_insert_iterator (_Container & __x):container (&__x)
+ {
+ } front_insert_iterator & operator= (typename _Container::
+ const_reference __value)
+ {
+ container->push_front (__value);
+ return *this;
+ }
+ front_insert_iterator & operator* ()
+ {
+ return *this;
+ }
+ front_insert_iterator & operator++ ()
+ {
+ return *this;
+ }
+ front_insert_iterator operator++ (int)
+ {
+ return *this;
+ }
+ };
+ template < typename _Container > inline front_insert_iterator < _Container >
+ front_inserter (_Container & __x)
+ {
+ return front_insert_iterator < _Container > (__x);
+ }
+template < typename _Container > class insert_iterator:public iterator < output_iterator_tag, void, void, void,
+ void >
+ {
+ protected:_Container * container;
+ typename _Container::iterator iter;
+ public:typedef _Container container_type;
+ insert_iterator (_Container & __x,
+ typename _Container::iterator __i):container (&__x),
+ iter (__i)
+ {
+ } insert_iterator & operator= (typename _Container::
+ const_reference __value)
+ {
+ iter = container->insert (iter, __value);
+ ++iter;
+ return *this;
+ }
+ insert_iterator & operator* ()
+ {
+ return *this;
+ }
+ insert_iterator & operator++ ()
+ {
+ return *this;
+ }
+ insert_iterator & operator++ (int)
+ {
+ return *this;
+ }
+ };
+ template < typename _Container,
+ typename _Iterator > inline insert_iterator < _Container >
+ inserter (_Container & __x, _Iterator __i)
+ {
+ return insert_iterator < _Container > (__x,
+ typename _Container::
+ iterator (__i));
+ }
+}
+
+namespace __gnu_cxx __attribute__ ((__visibility__ ("default")))
+{
+ using std::size_t;
+ using std::ptrdiff_t;
+ template < typename _Tp > class new_allocator
+ {
+ public:typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ typedef _Tp *pointer;
+ typedef const _Tp *const_pointer;
+ typedef _Tp & reference;
+ typedef const _Tp & const_reference;
+ typedef _Tp value_type;
+ template < typename _Tp1 > struct rebind
+ {
+ typedef new_allocator < _Tp1 > other;
+ };
+ new_allocator ()throw ()
+ {
+ } new_allocator (const new_allocator &) throw ()
+ {
+ } template < typename _Tp1 > new_allocator (const new_allocator < _Tp1 >
+ &) throw ()
+ {
+ } ~new_allocator ()throw ()
+ {
+ } pointer address (reference __x) const
+ {
+ return &__x;
+ }
+ const_pointer address (const_reference __x) const
+ {
+ return &__x;
+ }
+ pointer allocate (size_type __n, const void * = 0)
+ {
+ return static_cast < _Tp * >(::operator new (__n * sizeof (_Tp)));
+ }
+ void deallocate (pointer __p, size_type)
+ {
+ ::operator delete (__p);
+ } size_type max_size () const throw ()
+ {
+ return size_t (-1) / sizeof (_Tp);
+ }
+ void construct (pointer __p, const _Tp & __val)
+ {
+ ::new ((void *) __p) _Tp (__val);
+ } void destroy (pointer __p)
+ {
+ __p->~_Tp ();
+ }};
+ template < typename _Tp > inline bool operator== (const new_allocator <
+ _Tp > &,
+ const new_allocator <
+ _Tp > &)
+ {
+ return true;
+ }
+ template < typename _Tp > inline bool operator!= (const new_allocator <
+ _Tp > &,
+ const new_allocator <
+ _Tp > &)
+ {
+ return false;
+ }
+}
+
+namespace std __attribute__ ((__visibility__ ("default")))
+{
+ template < typename _Tp > class allocator;
+ template <> class allocator < void >
+ {
+ public:typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ typedef void *pointer;
+ typedef const void *const_pointer;
+ typedef void value_type;
+ template < typename _Tp1 > struct rebind
+ {
+ typedef allocator < _Tp1 > other;
+ };
+ };
+template < typename _Tp > class allocator:public __gnu_cxx::new_allocator <
+ _Tp >
+ {
+ public:typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ typedef _Tp *pointer;
+ typedef const _Tp *const_pointer;
+ typedef _Tp & reference;
+ typedef const _Tp & const_reference;
+ typedef _Tp value_type;
+ template < typename _Tp1 > struct rebind
+ {
+ typedef allocator < _Tp1 > other;
+ };
+ allocator ()throw ()
+ {
+ } allocator (const allocator & __a) throw ():__gnu_cxx::new_allocator <
+ _Tp > (__a)
+ {
+ } template < typename _Tp1 > allocator (const allocator < _Tp1 >
+ &) throw ()
+ {
+ } ~allocator ()throw ()
+ {
+ }};
+ template < typename _T1,
+ typename _T2 > inline bool operator== (const allocator < _T1 > &,
+ const allocator < _T2 > &)
+ {
+ return true;
+ }
+ template < typename _Tp > inline bool operator== (const allocator < _Tp > &,
+ const allocator < _Tp > &)
+ {
+ return true;
+ }
+ template < typename _T1,
+ typename _T2 > inline bool operator!= (const allocator < _T1 > &,
+ const allocator < _T2 > &)
+ {
+ return false;
+ }
+ template < typename _Tp > inline bool operator!= (const allocator < _Tp > &,
+ const allocator < _Tp > &)
+ {
+ return false;
+ }
+ template < typename _Alloc, bool = __is_empty (_Alloc) > struct __alloc_swap
+ {
+ static void _S_do_it (_Alloc &, _Alloc &)
+ {
+ }};
+ template < typename _Alloc > struct __alloc_swap <_Alloc, false >
+ {
+ static void _S_do_it (_Alloc & __one, _Alloc & __two)
+ {
+ if (__one != __two)
+ swap (__one, __two);
+ }
+ };
+ template < typename _Alloc, bool = __is_empty (_Alloc) > struct __alloc_neq
+ {
+ static bool _S_do_it (const _Alloc &, const _Alloc &)
+ {
+ return false;
+ }
+ };
+ template < typename _Alloc > struct __alloc_neq <_Alloc, false >
+ {
+ static bool _S_do_it (const _Alloc & __one, const _Alloc & __two)
+ {
+ return __one != __two;
+ }
+ };
+}
+
+namespace std __attribute__ ((__visibility__ ("default")))
+{
+ struct _List_node_base
+ {
+ _List_node_base *_M_next;
+ _List_node_base *_M_prev;
+ static void swap (_List_node_base & __x, _List_node_base & __y) throw ();
+ void _M_transfer (_List_node_base * const __first,
+ _List_node_base * const __last) throw ();
+ void _M_reverse () throw ();
+ void _M_hook (_List_node_base * const __position) throw ();
+ void _M_unhook () throw ();
+ };
+ template < typename _Tp > struct _List_node:public _List_node_base
+ {
+ _Tp _M_data;
+ };
+ template < typename _Tp > struct _List_iterator
+ {
+ typedef _List_iterator < _Tp > _Self;
+ typedef _List_node < _Tp > _Node;
+ typedef ptrdiff_t difference_type;
+ typedef std::bidirectional_iterator_tag iterator_category;
+ typedef _Tp value_type;
+ typedef _Tp *pointer;
+ typedef _Tp & reference;
+ _List_iterator ():_M_node ()
+ {
+ } explicit _List_iterator (_List_node_base * __x):_M_node (__x)
+ {
+ } reference operator* () const
+ {
+ return static_cast < _Node * >(_M_node)->_M_data;
+ }
+ pointer operator-> () const
+ {
+ return &static_cast < _Node * >(_M_node)->_M_data;
+ }
+ _Self & operator++ ()
+ {
+ _M_node = _M_node->_M_next;
+ return *this;
+ }
+ _Self operator++ (int)
+ {
+ _Self __tmp = *this;
+ _M_node = _M_node->_M_next;
+ return __tmp;
+ }
+ _Self & operator-- ()
+ {
+ _M_node = _M_node->_M_prev;
+ return *this;
+ }
+ _Self operator-- (int)
+ {
+ _Self __tmp = *this;
+ _M_node = _M_node->_M_prev;
+ return __tmp;
+ }
+ bool operator== (const _Self & __x) const
+ {
+ return _M_node == __x._M_node;
+ }
+ bool operator!= (const _Self & __x) const
+ {
+ return _M_node != __x._M_node;
+ }
+ _List_node_base *_M_node;
+ };
+ template < typename _Tp > struct _List_const_iterator
+ {
+ typedef _List_const_iterator < _Tp > _Self;
+ typedef const _List_node < _Tp > _Node;
+ typedef _List_iterator < _Tp > iterator;
+ typedef ptrdiff_t difference_type;
+ typedef std::bidirectional_iterator_tag iterator_category;
+ typedef _Tp value_type;
+ typedef const _Tp *pointer;
+ typedef const _Tp & reference;
+ _List_const_iterator ():_M_node ()
+ {
+ } explicit _List_const_iterator (const _List_node_base *
+ __x):_M_node (__x)
+ {
+ } _List_const_iterator (const iterator & __x):_M_node (__x._M_node)
+ {
+ } reference operator* () const
+ {
+ return static_cast < _Node * >(_M_node)->_M_data;
+ }
+ pointer operator-> () const
+ {
+ return &static_cast < _Node * >(_M_node)->_M_data;
+ }
+ _Self & operator++ ()
+ {
+ _M_node = _M_node->_M_next;
+ return *this;
+ }
+ _Self operator++ (int)
+ {
+ _Self __tmp = *this;
+ _M_node = _M_node->_M_next;
+ return __tmp;
+ }
+ _Self & operator-- ()
+ {
+ _M_node = _M_node->_M_prev;
+ return *this;
+ }
+ _Self operator-- (int)
+ {
+ _Self __tmp = *this;
+ _M_node = _M_node->_M_prev;
+ return __tmp;
+ }
+ bool operator== (const _Self & __x) const
+ {
+ return _M_node == __x._M_node;
+ }
+ bool operator!= (const _Self & __x) const
+ {
+ return _M_node != __x._M_node;
+ }
+ const _List_node_base *_M_node;
+ };
+ template < typename _Tp, typename _Alloc > class _List_base
+ {
+ protected:typedef typename _Alloc::template rebind < _List_node < _Tp >
+ >::other _Node_alloc_type;
+ typedef typename _Alloc::template rebind < _Tp >::other _Tp_alloc_type;
+ struct _List_impl:public _Node_alloc_type
+ {
+ _List_node_base _M_node;
+ _List_impl ():_Node_alloc_type (), _M_node ()
+ {
+ } _List_impl (const _Node_alloc_type & __a):_Node_alloc_type (__a),
+ _M_node ()
+ {
+ }};
+ _List_impl _M_impl;
+ _List_node < _Tp > *_M_get_node ()
+ {
+ return _M_impl._Node_alloc_type::allocate (1);
+ }
+ void _M_put_node (_List_node < _Tp > *__p)
+ {
+ _M_impl._Node_alloc_type::deallocate (__p, 1);
+ } public:typedef _Alloc allocator_type;
+ _Node_alloc_type & _M_get_Node_allocator ()
+ {
+ return *static_cast < _Node_alloc_type * >(&this->_M_impl);
+ }
+ const _Node_alloc_type & _M_get_Node_allocator () const
+ {
+ return *static_cast < const _Node_alloc_type *>(&this->_M_impl);
+ } _Tp_alloc_type _M_get_Tp_allocator () const
+ {
+ return _Tp_alloc_type (_M_get_Node_allocator ());
+ }
+ allocator_type get_allocator () const
+ {
+ return allocator_type (_M_get_Node_allocator ());
+ }
+ _List_base ():_M_impl ()
+ {
+ _M_init ();
+ }
+ _List_base (const allocator_type & __a):_M_impl (__a)
+ {
+ _M_init ();
+ } ~_List_base ()
+ {
+ _M_clear ();
+ } void _M_clear ();
+ void _M_init ()
+ {
+ this->_M_impl._M_node._M_next = &this->_M_impl._M_node;
+ this->_M_impl._M_node._M_prev = &this->_M_impl._M_node;
+ }};
+template < typename _Tp, typename _Alloc = std::allocator < _Tp > >class list:protected _List_base < _Tp,
+ _Alloc
+ >
+ {
+ typedef typename _Alloc::value_type _Alloc_value_type;
+ typedef _List_base < _Tp, _Alloc > _Base;
+ typedef typename _Base::_Tp_alloc_type _Tp_alloc_type;
+ public:typedef _Tp value_type;
+ typedef typename _Tp_alloc_type::pointer pointer;
+ typedef typename _Tp_alloc_type::const_pointer const_pointer;
+ typedef typename _Tp_alloc_type::reference reference;
+ typedef typename _Tp_alloc_type::const_reference const_reference;
+ typedef _List_iterator < _Tp > iterator;
+ typedef _List_const_iterator < _Tp > const_iterator;
+ typedef std::reverse_iterator < const_iterator > const_reverse_iterator;
+ typedef std::reverse_iterator < iterator > reverse_iterator;
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ typedef _Alloc allocator_type;
+ protected:typedef _List_node < _Tp > _Node;
+ using _Base::_M_impl;
+ using _Base::_M_put_node;
+ using _Base::_M_get_node;
+ using _Base::_M_get_Tp_allocator;
+ using _Base::_M_get_Node_allocator;
+ public:iterator begin ()
+ {
+ return iterator (this->_M_impl._M_node._M_next);
+ }
+ const_iterator begin () const
+ {
+ return const_iterator (this->_M_impl._M_node._M_next);
+ }
+ iterator end ()
+ {
+ return iterator (&this->_M_impl._M_node);
+ }
+ void remove (const _Tp & __value);
+ template < typename _Predicate > void remove_if (_Predicate);
+ void _M_erase (iterator __position)
+ {
+ __position._M_node->_M_unhook ();
+ _Node *__n = static_cast < _Node * >(__position._M_node);
+ _M_get_Tp_allocator ().destroy (&__n->_M_data);
+ _M_put_node (__n);
+ } void _M_check_equal_allocators (list & __x)
+ {
+ if (std::__alloc_neq <
+ typename _Base::_Node_alloc_type >::
+ _S_do_it (_M_get_Node_allocator (), __x._M_get_Node_allocator ()));
+ }
+ };
+}
+
+namespace std __attribute__ ((__visibility__ ("default")))
+{
+ template < typename _Tp, typename _Alloc > void list < _Tp,
+ _Alloc >::remove (const value_type & __value)
+ {
+ iterator __first = begin ();
+ iterator __last = end ();
+ iterator __extra = __last;
+ while (__first != __last)
+ {
+ iterator __next = __first;
+ ++__next;
+ if (*__first == __value)
+ {
+ if (&*__first != &__value)
+ _M_erase (__first);
+ else
+ __extra = __first;
+ }
+ __first = __next;
+ }
+ if (__extra != __last)
+ _M_erase (__extra);
+ }
+}
+
+class Unit
+{
+public:int dummy;
+};
+class Building
+{
+public:__attribute__ ((transaction_callable)) void removeUnitFromInside (Unit *
+ unit);
+ std::list < Unit * >unitsInside;
+};
+void
+Building::removeUnitFromInside (Unit * unit)
+{
+ unitsInside.remove (unit);
+}
diff --git a/gcc/testsuite/g++.dg/tm/alias.C b/gcc/testsuite/g++.dg/tm/alias.C
new file mode 100644
index 00000000000..4459c703bdc
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tm/alias.C
@@ -0,0 +1,20 @@
+// { dg-do compile }
+// { dg-options "-fgnu-tm -O0" }
+
+/* Test that we generate transactional clones for both the base and
+ the complete dtor for class Itemset. */
+
+class Itemset {
+public:
+ __attribute__((transaction_safe)) ~Itemset();
+ __attribute__((transaction_safe)) void operator delete(void *);
+private:
+};
+
+__attribute__((transaction_safe))
+Itemset::~Itemset()
+{
+}
+
+// { dg-final { scan-assembler "_ZGTtN7ItemsetD1Ev" } }
+// { dg-final { scan-assembler "_ZGTtN7ItemsetD2Ev" } }
diff --git a/gcc/testsuite/g++.dg/tm/attrib-2.C b/gcc/testsuite/g++.dg/tm/attrib-2.C
new file mode 100644
index 00000000000..6a418e559cf
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tm/attrib-2.C
@@ -0,0 +1,22 @@
+// { dg-do compile }
+// { dg-options "-fgnu-tm -fdump-tree-optimized-asmname" }
+
+struct __attribute__((transaction_safe)) Tsafe
+{
+ void f();
+};
+
+void Tsafe::f() { }
+
+struct __attribute__((transaction_callable)) Tcall
+{
+ void f();
+};
+
+void Tcall::f() { }
+
+// { dg-final { scan-tree-dump-times "_ZN5Tsafe1fEv" 1 "optimized" } }
+// { dg-final { scan-tree-dump-times "_ZN5Tcall1fEv" 1 "optimized" } }
+// { dg-final { scan-tree-dump-times "_ZGTtN5Tsafe1fEv" 1 "optimized" } }
+// { dg-final { scan-tree-dump-times "_ZGTtN5Tcall1fEv" 1 "optimized" } }
+// { dg-final { cleanup-tree-dump "optimized" } }
diff --git a/gcc/testsuite/g++.dg/tm/attrib-3.C b/gcc/testsuite/g++.dg/tm/attrib-3.C
new file mode 100644
index 00000000000..a2c9718bc09
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tm/attrib-3.C
@@ -0,0 +1,33 @@
+// { dg-do compile }
+// { dg-options "-fgnu-tm -fdump-tree-optimized-asmname" }
+
+struct __attribute__((transaction_safe)) A
+{
+};
+
+struct B : public A
+{
+ void f();
+};
+
+struct C
+{
+};
+
+struct D : public C
+{
+};
+
+struct E : public D, public A
+{
+ void f();
+};
+
+void B::f() { }
+void E::f() { }
+
+// { dg-final { scan-tree-dump-times "_ZN1B1fEv" 1 "optimized" } }
+// { dg-final { scan-tree-dump-times "_ZGTtN1B1fEv" 1 "optimized" } }
+// { dg-final { scan-tree-dump-times "_ZN1E1fEv" 1 "optimized" } }
+// { dg-final { scan-tree-dump-times "_ZGTtN1E1fEv" 1 "optimized" } }
+// { dg-final { cleanup-tree-dump "optimized" } }
diff --git a/gcc/testsuite/g++.dg/tm/attrib-4.C b/gcc/testsuite/g++.dg/tm/attrib-4.C
new file mode 100644
index 00000000000..b589b9d41a2
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tm/attrib-4.C
@@ -0,0 +1,48 @@
+// { dg-do compile }
+// { dg-options "-fgnu-tm" }
+
+#define __ts __attribute__((transaction_safe))
+#define __tc __attribute__((transaction_callable))
+#define __tp __attribute__((transaction_pure))
+#define __tu __attribute__((transaction_unsafe))
+
+struct __ts A
+{
+ virtual void f();
+ virtual void g();
+};
+
+struct __tc B : public A
+{
+ void f() __tc; // { dg-error ".transaction_callable. overriding .transaction_safe." }
+ void g();
+ virtual void h();
+};
+
+struct C : public B
+{
+ void g() __tc; // { dg-error ".transaction_callable. overriding .transaction_safe." }
+};
+
+struct C2 : public B
+{
+ void g() __ts;
+ void h() __tu; // { dg-error ".transaction_unsafe. overriding .transaction_callable." }
+};
+
+struct D
+{
+ virtual void f() __tp;
+ virtual void g() __tp;
+};
+
+struct E : public D
+{
+ void f() __ts; // { dg-error ".transaction_safe. overriding .transaction_pure." }
+ void g();
+};
+
+struct F : public E
+{
+ void g() __ts; // { dg-error ".transaction_safe. overriding .transaction_pure." }
+};
diff --git a/gcc/testsuite/g++.dg/tm/fatomic-1.C b/gcc/testsuite/g++.dg/tm/fatomic-1.C
new file mode 100644
index 00000000000..dac850aac07
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tm/fatomic-1.C
@@ -0,0 +1,10 @@
+// { dg-do compile }
+// { dg-options "-fgnu-tm" }
+
+struct S
+{
+ int i, j, k;
+ S();
+};
+
+S::S() __transaction_atomic : i(1), j(2), k(3) { }
diff --git a/gcc/testsuite/g++.dg/tm/nested-1.C b/gcc/testsuite/g++.dg/tm/nested-1.C
new file mode 100644
index 00000000000..1f4e19e9891
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tm/nested-1.C
@@ -0,0 +1,22 @@
+// { dg-do compile }
+// { dg-options "-fgnu-tm" }
+
+class HashTree
+{
+public:
+ __attribute__((transaction_safe))
+ int add_element2();
+private:
+ int Count;
+};
+
+
+__attribute__((transaction_safe))
+int HashTree::add_element2()
+{
+ int tt;
+ __transaction_atomic {
+ tt = Count;
+ }
+ return tt;
+}
diff --git a/gcc/testsuite/g++.dg/tm/nested-2.C b/gcc/testsuite/g++.dg/tm/nested-2.C
new file mode 100644
index 00000000000..c6d5d98ba7a
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tm/nested-2.C
@@ -0,0 +1,41 @@
+// { dg-do compile }
+// { dg-options "-fgnu-tm -O" }
+
+typedef unsigned long int uint64_t;
+extern int *hash_indx;
+
+typedef struct
+{
+ uint64_t exit_atomicsec_time;
+} ent_ex_times;
+class HashTree
+{
+public:
+ __attribute__((transaction_safe))
+ void *operator new(__SIZE_TYPE__);
+ __attribute__((transaction_safe))
+ int add_element();
+private:
+ HashTree **Hash_table;
+ int Count;
+};
+
+
+__attribute__((transaction_safe))
+int HashTree::add_element()
+{
+ ent_ex_times enter_exit_times_inside;
+ int val = hash_indx[5];
+ int tt;
+ if (Hash_table[val] == __null)
+ {
+ __transaction_atomic {
+ Hash_table[val] = new HashTree;
+ }
+ }
+ __transaction_atomic {
+ tt = Count++;
+ enter_exit_times_inside.exit_atomicsec_time = 5;
+ }
+ return tt;
+}
diff --git a/gcc/testsuite/g++.dg/tm/nested-3.C b/gcc/testsuite/g++.dg/tm/nested-3.C
new file mode 100644
index 00000000000..19718c34a02
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tm/nested-3.C
@@ -0,0 +1,43 @@
+// { dg-do compile }
+// { dg-options "-fgnu-tm -O0" }
+
+// Same as nested-2.C but with no optimization.
+
+typedef unsigned long int uint64_t;
+extern int *hash_indx;
+
+typedef struct
+{
+ uint64_t exit_atomicsec_time;
+} ent_ex_times;
+class HashTree
+{
+public:
+ __attribute__((transaction_safe))
+ void *operator new(__SIZE_TYPE__);
+ __attribute__((transaction_safe))
+ int add_element();
+private:
+ HashTree **Hash_table;
+ int Count;
+};
+
+
+__attribute__((transaction_safe))
+int HashTree::add_element()
+{
+ ent_ex_times enter_exit_times_inside;
+ int val = hash_indx[5];
+ int tt;
+ if (Hash_table[val] == __null)
+ {
+ __transaction_atomic {
+ Hash_table[val] = new HashTree;
+ }
+ }
+ __transaction_atomic {
+ tt = Count++;
+ enter_exit_times_inside.exit_atomicsec_time = 5;
+ }
+ return tt;
+}
diff --git a/gcc/testsuite/g++.dg/tm/opt-1.C b/gcc/testsuite/g++.dg/tm/opt-1.C
new file mode 100644
index 00000000000..8dd41a5bfe8
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tm/opt-1.C
@@ -0,0 +1,9 @@
+// { dg-do compile }
+// { dg-options "-fgnu-tm -O" }
+
+struct S
+{
+ virtual void f() __attribute__((transaction_safe));
+};
+
+void f(S *s) { __transaction_atomic { s->f(); } }
diff --git a/gcc/testsuite/g++.dg/tm/pr45940-2.C b/gcc/testsuite/g++.dg/tm/pr45940-2.C
new file mode 100644
index 00000000000..ab10c34baaa
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tm/pr45940-2.C
@@ -0,0 +1,30 @@
+// { dg-do compile }
+// { dg-options "-fgnu-tm -O1" }
+
+__attribute__((transaction_pure))
+inline int atomic_exchange_and_add(int dv )
+{
+ int r;
+ __asm__ ("" : "=r"(r));
+ return r;
+}
+
+class sp_counted_base
+{
+public:
+ __attribute__((transaction_safe))
+ void release()
+ {
+ if( atomic_exchange_and_add(-1 ) == 1 )
+ {
+ }
+ }
+};
+
+sp_counted_base *base;
+
+void here(){
+ __transaction_atomic {
+ base->release();
+ }
+}
diff --git a/gcc/testsuite/g++.dg/tm/pr45940-3.C b/gcc/testsuite/g++.dg/tm/pr45940-3.C
new file mode 100644
index 00000000000..c8caee6c8b9
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tm/pr45940-3.C
@@ -0,0 +1,69 @@
+// { dg-do compile }
+// { dg-options "-fgnu-tm -O0" }
+
+__attribute__((transaction_safe))
+void* operator new (__SIZE_TYPE__);
+
+__attribute__((transaction_pure))
+inline int atomic_exchange_and_add( int * pw, int dv )
+{
+ int r;
+ __asm__ ("" : "=r"(r));
+ return r;
+}
+
+class sp_counted_base
+{
+protected:
+ int use_count_; // #shared
+public:
+ __attribute__((transaction_safe))
+ virtual void dispose() = 0; // nothrow
+
+ __attribute__((transaction_safe))
+ void release() // nothrow
+ {
+ if( atomic_exchange_and_add( &use_count_, -1 ) == 1 )
+ {
+ dispose();
+ }
+ }
+};
+
+class sp_counted_base_x86 : public sp_counted_base
+{
+public:
+ void dispose()
+ {
+ release();
+ }
+};
+
+class shared_count
+{
+private:
+ sp_counted_base * pi_;
+public:
+ int j;
+ __attribute__((transaction_safe))
+ shared_count(): pi_(new sp_counted_base_x86()), j(0)
+ {
+ }
+ __attribute__((transaction_safe))
+ ~shared_count() // nothrow
+ {
+ if( pi_ != 0 ) pi_->release();
+ }
+};
+
+volatile int i = 1;
+shared_count * c;
+int main()
+{
+ if ( i == 0) {
+ __transaction_atomic {
+ shared_count sc;
+ }
+ }
+ return 0;
+}
diff --git a/gcc/testsuite/g++.dg/tm/pr45940-4.C b/gcc/testsuite/g++.dg/tm/pr45940-4.C
new file mode 100644
index 00000000000..d1cb8d55ae6
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tm/pr45940-4.C
@@ -0,0 +1,69 @@
+// { dg-do compile }
+// { dg-options "-fgnu-tm -O1" }
+
+__attribute__((transaction_safe))
+void* operator new (__SIZE_TYPE__);
+
+__attribute__((transaction_pure))
+inline int atomic_exchange_and_add( int * pw, int dv )
+{
+ int r;
+ __asm__ ("" : "=r"(r));
+ return r;
+}
+
+class sp_counted_base
+{
+protected:
+ int use_count_; // #shared
+public:
+ __attribute__((transaction_safe))
+ virtual void dispose() = 0; // nothrow
+
+ __attribute__((transaction_safe))
+ void release() // nothrow
+ {
+ if( atomic_exchange_and_add( &use_count_, -1 ) == 1 )
+ {
+ dispose();
+ }
+ }
+};
+
+class sp_counted_base_x86 : public sp_counted_base
+{
+public:
+ void dispose()
+ {
+ release();
+ }
+};
+
+class shared_count
+{
+private:
+ sp_counted_base * pi_;
+public:
+ int j;
+ __attribute__((transaction_safe))
+ shared_count(): pi_(new sp_counted_base_x86()), j(0)
+ {
+ }
+ __attribute__((transaction_safe))
+ ~shared_count() // nothrow
+ {
+ if( pi_ != 0 ) pi_->release();
+ }
+};
+
+volatile int i = 1;
+shared_count * c;
+int main()
+{
+ if ( i == 0) {
+ __transaction_atomic {
+ shared_count sc;
+ }
+ }
+ return 0;
+}
diff --git a/gcc/testsuite/g++.dg/tm/pr45940.C b/gcc/testsuite/g++.dg/tm/pr45940.C
new file mode 100644
index 00000000000..3e9a2c3f8da
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tm/pr45940.C
@@ -0,0 +1,30 @@
+// { dg-do compile }
+// { dg-options "-fgnu-tm -O0" }
+
+__attribute__((transaction_pure))
+inline int atomic_exchange_and_add(int dv )
+{
+ int r;
+ __asm__ ("" : "=r"(r));
+ return r;
+}
+
+class sp_counted_base
+{
+public:
+ __attribute__((transaction_safe))
+ void release()
+ {
+ if( atomic_exchange_and_add(-1 ) == 1 )
+ {
+ }
+ }
+};
+
+sp_counted_base *base;
+
+void here(){
+ __transaction_atomic {
+ base->release();
+ }
+}
diff --git a/gcc/testsuite/g++.dg/tm/pr46269.C b/gcc/testsuite/g++.dg/tm/pr46269.C
new file mode 100644
index 00000000000..b4b13af5bf1
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tm/pr46269.C
@@ -0,0 +1,29 @@
+// { dg-do compile }
+// { dg-options "-fgnu-tm" }
+
+static inline void atomic_exchange_and_add()
+{
+ __asm__ ("");
+}
+
+template<class T> class shared_ptr
+{
+public:
+ shared_ptr( T * p )
+ {
+ atomic_exchange_and_add();
+ }
+};
+
+class BuildingCompletedEvent
+{
+ public:
+ __attribute__((transaction_callable)) void updateBuildingSite(void);
+ __attribute__((transaction_pure)) BuildingCompletedEvent();
+};
+
+void BuildingCompletedEvent::updateBuildingSite(void)
+{
+ shared_ptr<BuildingCompletedEvent> event(new BuildingCompletedEvent());
+}
+
diff --git a/gcc/testsuite/g++.dg/tm/pr46270.C b/gcc/testsuite/g++.dg/tm/pr46270.C
new file mode 100644
index 00000000000..291e620a387
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tm/pr46270.C
@@ -0,0 +1,27 @@
+// { dg-do compile }
+// { dg-options "-fgnu-tm" }
+
+#include <list>
+class Game
+{
+public:
+ struct BuildProject
+ {
+ int posX;
+ };
+ std::list<BuildProject> buildProjects;
+};
+
+static Game game;
+static std::list<std::list<Game::BuildProject>::iterator> erasableBuildProjects;
+
+static void *buildProjectSyncStepConcurrently(int id, int localTeam)
+{
+ __transaction_relaxed {
+ std::list<std::list<Game::BuildProject>::iterator>::iterator it
+ = erasableBuildProjects.begin();
+ game.buildProjects.erase( (std::list<Game::BuildProject>
+ ::iterator) *it);
+ }
+ return 0;
+}
diff --git a/gcc/testsuite/g++.dg/tm/pr46300.C b/gcc/testsuite/g++.dg/tm/pr46300.C
new file mode 100644
index 00000000000..7b3c613c3f5
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tm/pr46300.C
@@ -0,0 +1,8 @@
+// { dg-do compile }
+// { dg-options "-fgnu-tm" }
+
+void foo(){
+ __transaction_atomic {
+ throw 5;
+ }
+}
diff --git a/gcc/testsuite/g++.dg/tm/pr46567.C b/gcc/testsuite/g++.dg/tm/pr46567.C
new file mode 100644
index 00000000000..2f0ef93f2d4
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tm/pr46567.C
@@ -0,0 +1,2676 @@
+// { dg-do compile }
+// { dg-options "-fgnu-tm -O2" }
+
+typedef __PTRDIFF_TYPE__ ptrdiff_t;
+typedef __SIZE_TYPE__ size_t;
+namespace std __attribute__ ((__visibility__ ("default"))) {
+ using ::ptrdiff_t;
+ using ::size_t;
+}
+namespace std __attribute__ ((__visibility__ ("default"))) {
+ void
+ __throw_bad_exception(void) __attribute__((__noreturn__));
+ void
+ __throw_bad_alloc(void) __attribute__((__noreturn__));
+ void
+ __throw_bad_cast(void) __attribute__((__noreturn__));
+ void
+ __throw_bad_typeid(void) __attribute__((__noreturn__));
+ void
+ __throw_logic_error(const char*) __attribute__((__noreturn__));
+ void
+ __throw_domain_error(const char*) __attribute__((__noreturn__));
+ void
+ __throw_invalid_argument(const char*) __attribute__((__noreturn__));
+ void
+ __throw_length_error(const char*) __attribute__((__noreturn__));
+ void
+ __throw_out_of_range(const char*) __attribute__((__noreturn__));
+ void
+ __throw_runtime_error(const char*) __attribute__((__noreturn__));
+ void
+ __throw_range_error(const char*) __attribute__((__noreturn__));
+ void
+ __throw_overflow_error(const char*) __attribute__((__noreturn__));
+ void
+ __throw_underflow_error(const char*) __attribute__((__noreturn__));
+ void
+ __throw_ios_failure(const char*) __attribute__((__noreturn__));
+ void
+ __throw_system_error(int) __attribute__((__noreturn__));
+}
+
+namespace __gnu_cxx __attribute__ ((__visibility__ ("default"))) {
+ template<typename _Iterator, typename _Container>
+ class __normal_iterator;
+}
+namespace std __attribute__ ((__visibility__ ("default"))) {
+ struct __true_type { };
+ struct __false_type { };
+ template<bool>
+ struct __truth_type
+ { typedef __false_type __type; };
+ template<>
+ struct __truth_type<true>
+ { typedef __true_type __type; };
+ template<class _Sp, class _Tp>
+ struct __traitor
+ {
+ enum { __value = bool(_Sp::__value) || bool(_Tp::__value) };
+ typedef typename __truth_type<__value>::__type __type;
+ };
+ template<typename, typename>
+ struct __are_same
+ {
+ enum { __value = 0 };
+ typedef __false_type __type;
+ };
+ template<typename _Tp>
+ struct __are_same<_Tp, _Tp>
+ {
+ enum { __value = 1 };
+ typedef __true_type __type;
+ };
+ template<typename _Tp>
+ struct __is_void
+ {
+ enum { __value = 0 };
+ typedef __false_type __type;
+ };
+ template<>
+ struct __is_void<void>
+ {
+ enum { __value = 1 };
+ typedef __true_type __type;
+ };
+ template<typename _Tp>
+ struct __is_integer
+ {
+ enum { __value = 0 };
+ typedef __false_type __type;
+ };
+ template<>
+ struct __is_integer<bool>
+ {
+ enum { __value = 1 };
+ typedef __true_type __type;
+ };
+ template<>
+ struct __is_integer<char>
+ {
+ enum { __value = 1 };
+ typedef __true_type __type;
+ };
+ template<>
+ struct __is_integer<signed char>
+ {
+ enum { __value = 1 };
+ typedef __true_type __type;
+ };
+ template<>
+ struct __is_integer<unsigned char>
+ {
+ enum { __value = 1 };
+ typedef __true_type __type;
+ };
+ template<>
+ struct __is_integer<wchar_t>
+ {
+ enum { __value = 1 };
+ typedef __true_type __type;
+ };
+ template<>
+ struct __is_integer<short>
+ {
+ enum { __value = 1 };
+ typedef __true_type __type;
+ };
+ template<>
+ struct __is_integer<unsigned short>
+ {
+ enum { __value = 1 };
+ typedef __true_type __type;
+ };
+ template<>
+ struct __is_integer<int>
+ {
+ enum { __value = 1 };
+ typedef __true_type __type;
+ };
+ template<>
+ struct __is_integer<unsigned int>
+ {
+ enum { __value = 1 };
+ typedef __true_type __type;
+ };
+ template<>
+ struct __is_integer<long>
+ {
+ enum { __value = 1 };
+ typedef __true_type __type;
+ };
+ template<>
+ struct __is_integer<unsigned long>
+ {
+ enum { __value = 1 };
+ typedef __true_type __type;
+ };
+ template<>
+ struct __is_integer<long long>
+ {
+ enum { __value = 1 };
+ typedef __true_type __type;
+ };
+ template<>
+ struct __is_integer<unsigned long long>
+ {
+ enum { __value = 1 };
+ typedef __true_type __type;
+ };
+ template<typename _Tp>
+ struct __is_floating
+ {
+ enum { __value = 0 };
+ typedef __false_type __type;
+ };
+ template<>
+ struct __is_floating<float>
+ {
+ enum { __value = 1 };
+ typedef __true_type __type;
+ };
+ template<>
+ struct __is_floating<double>
+ {
+ enum { __value = 1 };
+ typedef __true_type __type;
+ };
+ template<>
+ struct __is_floating<long double>
+ {
+ enum { __value = 1 };
+ typedef __true_type __type;
+ };
+ template<typename _Tp>
+ struct __is_pointer
+ {
+ enum { __value = 0 };
+ typedef __false_type __type;
+ };
+ template<typename _Tp>
+ struct __is_pointer<_Tp*>
+ {
+ enum { __value = 1 };
+ typedef __true_type __type;
+ };
+ template<typename _Tp>
+ struct __is_normal_iterator
+ {
+ enum { __value = 0 };
+ typedef __false_type __type;
+ };
+ template<typename _Iterator, typename _Container>
+ struct __is_normal_iterator< __gnu_cxx::__normal_iterator<_Iterator,
+ _Container> >
+ {
+ enum { __value = 1 };
+ typedef __true_type __type;
+ };
+ template<typename _Tp>
+ struct __is_arithmetic
+ : public __traitor<__is_integer<_Tp>, __is_floating<_Tp> >
+ { };
+ template<typename _Tp>
+ struct __is_fundamental
+ : public __traitor<__is_void<_Tp>, __is_arithmetic<_Tp> >
+ { };
+ template<typename _Tp>
+ struct __is_scalar
+ : public __traitor<__is_arithmetic<_Tp>, __is_pointer<_Tp> >
+ { };
+ template<typename _Tp>
+ struct __is_char
+ {
+ enum { __value = 0 };
+ typedef __false_type __type;
+ };
+ template<>
+ struct __is_char<char>
+ {
+ enum { __value = 1 };
+ typedef __true_type __type;
+ };
+ template<>
+ struct __is_char<wchar_t>
+ {
+ enum { __value = 1 };
+ typedef __true_type __type;
+ };
+ template<typename _Tp>
+ struct __is_byte
+ {
+ enum { __value = 0 };
+ typedef __false_type __type;
+ };
+ template<>
+ struct __is_byte<char>
+ {
+ enum { __value = 1 };
+ typedef __true_type __type;
+ };
+ template<>
+ struct __is_byte<signed char>
+ {
+ enum { __value = 1 };
+ typedef __true_type __type;
+ };
+ template<>
+ struct __is_byte<unsigned char>
+ {
+ enum { __value = 1 };
+ typedef __true_type __type;
+ };
+ template<typename _Tp>
+ struct __is_move_iterator
+ {
+ enum { __value = 0 };
+ typedef __false_type __type;
+ };
+}
+
+namespace __gnu_cxx __attribute__ ((__visibility__ ("default"))) {
+ template<bool, typename>
+ struct __enable_if
+ { };
+ template<typename _Tp>
+ struct __enable_if<true, _Tp>
+ { typedef _Tp __type; };
+ template<bool _Cond, typename _Iftrue, typename _Iffalse>
+ struct __conditional_type
+ { typedef _Iftrue __type; };
+ template<typename _Iftrue, typename _Iffalse>
+ struct __conditional_type<false, _Iftrue, _Iffalse>
+ { typedef _Iffalse __type; };
+ template<typename _Tp>
+ struct __add_unsigned
+ {
+ private:
+ typedef __enable_if<std::__is_integer<_Tp>::__value, _Tp> __if_type;
+ public:
+ typedef typename __if_type::__type __type;
+ };
+ template<>
+ struct __add_unsigned<char>
+ { typedef unsigned char __type; };
+ template<>
+ struct __add_unsigned<signed char>
+ { typedef unsigned char __type; };
+ template<>
+ struct __add_unsigned<short>
+ { typedef unsigned short __type; };
+ template<>
+ struct __add_unsigned<int>
+ { typedef unsigned int __type; };
+ template<>
+ struct __add_unsigned<long>
+ { typedef unsigned long __type; };
+ template<>
+ struct __add_unsigned<long long>
+ { typedef unsigned long long __type; };
+ template<>
+ struct __add_unsigned<bool>;
+ template<>
+ struct __add_unsigned<wchar_t>;
+ template<typename _Tp>
+ struct __remove_unsigned
+ {
+ private:
+ typedef __enable_if<std::__is_integer<_Tp>::__value, _Tp> __if_type;
+ public:
+ typedef typename __if_type::__type __type;
+ };
+ template<>
+ struct __remove_unsigned<char>
+ { typedef signed char __type; };
+ template<>
+ struct __remove_unsigned<unsigned char>
+ { typedef signed char __type; };
+ template<>
+ struct __remove_unsigned<unsigned short>
+ { typedef short __type; };
+ template<>
+ struct __remove_unsigned<unsigned int>
+ { typedef int __type; };
+ template<>
+ struct __remove_unsigned<unsigned long>
+ { typedef long __type; };
+ template<>
+ struct __remove_unsigned<unsigned long long>
+ { typedef long long __type; };
+ template<>
+ struct __remove_unsigned<bool>;
+ template<>
+ struct __remove_unsigned<wchar_t>;
+ template<typename _Type>
+ inline bool
+ __is_null_pointer(_Type* __ptr)
+ { return __ptr == 0; }
+ template<typename _Type>
+ inline bool
+ __is_null_pointer(_Type)
+ { return false; }
+ template<typename _Tp, bool = std::__is_integer<_Tp>::__value>
+ struct __promote
+ { typedef double __type; };
+ template<typename _Tp>
+ struct __promote<_Tp, false>
+ { typedef _Tp __type; };
+ template<typename _Tp, typename _Up>
+ struct __promote_2
+ {
+ private:
+ typedef typename __promote<_Tp>::__type __type1;
+ typedef typename __promote<_Up>::__type __type2;
+ public:
+ typedef __typeof__(__type1() + __type2()) __type;
+ };
+ template<typename _Tp, typename _Up, typename _Vp>
+ struct __promote_3
+ {
+ private:
+ typedef typename __promote<_Tp>::__type __type1;
+ typedef typename __promote<_Up>::__type __type2;
+ typedef typename __promote<_Vp>::__type __type3;
+ public:
+ typedef __typeof__(__type1() + __type2() + __type3()) __type;
+ };
+ template<typename _Tp, typename _Up, typename _Vp, typename _Wp>
+ struct __promote_4
+ {
+ private:
+ typedef typename __promote<_Tp>::__type __type1;
+ typedef typename __promote<_Up>::__type __type2;
+ typedef typename __promote<_Vp>::__type __type3;
+ typedef typename __promote<_Wp>::__type __type4;
+ public:
+ typedef __typeof__(__type1() + __type2() + __type3() + __type4()) __type;
+ };
+}
+
+namespace __gnu_cxx __attribute__ ((__visibility__ ("default"))) {
+ template<typename _Value>
+ struct __numeric_traits_integer
+ {
+ static const _Value __min = (((_Value)(-1) < 0) ? (_Value)1 << (sizeof(_Value) * 8 - ((_Value)(-1) < 0)) : (_Value)0);
+ static const _Value __max = (((_Value)(-1) < 0) ? (((((_Value)1 << ((sizeof(_Value) * 8 - ((_Value)(-1) < 0)) - 1)) - 1) << 1) + 1) : ~(_Value)0);
+ static const bool __is_signed = ((_Value)(-1) < 0);
+ static const int __digits = (sizeof(_Value) * 8 - ((_Value)(-1) < 0));
+ };
+ template<typename _Value>
+ const _Value __numeric_traits_integer<_Value>::__min;
+ template<typename _Value>
+ const _Value __numeric_traits_integer<_Value>::__max;
+ template<typename _Value>
+ const bool __numeric_traits_integer<_Value>::__is_signed;
+ template<typename _Value>
+ const int __numeric_traits_integer<_Value>::__digits;
+ template<typename _Value>
+ struct __numeric_traits_floating
+ {
+ static const int __max_digits10 = (2 + (std::__are_same<_Value, float>::__value ? 24 : std::__are_same<_Value, double>::__value ? 53 : 64) * 3010 / 10000);
+ static const bool __is_signed = true;
+ static const int __digits10 = (std::__are_same<_Value, float>::__value ? 6 : std::__are_same<_Value, double>::__value ? 15 : 18);
+ static const int __max_exponent10 = (std::__are_same<_Value, float>::__value ? 38 : std::__are_same<_Value, double>::__value ? 308 : 4932);
+ };
+ template<typename _Value>
+ const int __numeric_traits_floating<_Value>::__max_digits10;
+ template<typename _Value>
+ const bool __numeric_traits_floating<_Value>::__is_signed;
+ template<typename _Value>
+ const int __numeric_traits_floating<_Value>::__digits10;
+ template<typename _Value>
+ const int __numeric_traits_floating<_Value>::__max_exponent10;
+ template<typename _Value>
+ struct __numeric_traits
+ : public __conditional_type<std::__is_integer<_Value>::__value,
+ __numeric_traits_integer<_Value>,
+ __numeric_traits_floating<_Value> >::__type
+ { };
+}
+
+
+namespace std __attribute__ ((__visibility__ ("default"))) {
+ template<typename _Tp>
+ inline void
+ swap(_Tp& __a, _Tp& __b)
+ {
+
+ _Tp __tmp = (__a);
+ __a = (__b);
+ __b = (__tmp);
+ }
+ template<typename _Tp, size_t _Nm>
+ inline void
+ swap(_Tp (&__a)[_Nm], _Tp (&__b)[_Nm])
+ {
+ for (size_t __n = 0; __n < _Nm; ++__n)
+ swap(__a[__n], __b[__n]);
+ }
+}
+namespace std __attribute__ ((__visibility__ ("default"))) {
+ template<class _T1, class _T2>
+ struct pair
+ {
+ typedef _T1 first_type;
+ typedef _T2 second_type;
+ _T1 first;
+ _T2 second;
+ pair()
+ : first(), second() { }
+ pair(const _T1& __a, const _T2& __b)
+ : first(__a), second(__b) { }
+ template<class _U1, class _U2>
+ pair(const pair<_U1, _U2>& __p)
+ : first(__p.first),
+ second(__p.second) { }
+ };
+ template<class _T1, class _T2>
+ inline bool
+ operator==(const pair<_T1, _T2>& __x, const pair<_T1, _T2>& __y)
+ { return __x.first == __y.first && __x.second == __y.second; }
+ template<class _T1, class _T2>
+ inline bool
+ operator<(const pair<_T1, _T2>& __x, const pair<_T1, _T2>& __y)
+ { return __x.first < __y.first
+ || (!(__y.first < __x.first) && __x.second < __y.second); }
+ template<class _T1, class _T2>
+ inline bool
+ operator!=(const pair<_T1, _T2>& __x, const pair<_T1, _T2>& __y)
+ { return !(__x == __y); }
+ template<class _T1, class _T2>
+ inline bool
+ operator>(const pair<_T1, _T2>& __x, const pair<_T1, _T2>& __y)
+ { return __y < __x; }
+ template<class _T1, class _T2>
+ inline bool
+ operator<=(const pair<_T1, _T2>& __x, const pair<_T1, _T2>& __y)
+ { return !(__y < __x); }
+ template<class _T1, class _T2>
+ inline bool
+ operator>=(const pair<_T1, _T2>& __x, const pair<_T1, _T2>& __y)
+ { return !(__x < __y); }
+ template<class _T1, class _T2>
+ inline pair<_T1, _T2>
+ make_pair(_T1 __x, _T2 __y)
+ { return pair<_T1, _T2>(__x, __y); }
+}
+
+
+namespace std __attribute__ ((__visibility__ ("default"))) {
+ struct input_iterator_tag { };
+ struct output_iterator_tag { };
+ struct forward_iterator_tag : public input_iterator_tag { };
+ struct bidirectional_iterator_tag : public forward_iterator_tag { };
+ struct random_access_iterator_tag : public bidirectional_iterator_tag { };
+ template<typename _Category, typename _Tp, typename _Distance = ptrdiff_t,
+ typename _Pointer = _Tp*, typename _Reference = _Tp&>
+ struct iterator
+ {
+ typedef _Category iterator_category;
+ typedef _Tp value_type;
+ typedef _Distance difference_type;
+ typedef _Pointer pointer;
+ typedef _Reference reference;
+ };
+ template<typename _Iterator>
+ struct iterator_traits
+ {
+ typedef typename _Iterator::iterator_category iterator_category;
+ typedef typename _Iterator::value_type value_type;
+ typedef typename _Iterator::difference_type difference_type;
+ typedef typename _Iterator::pointer pointer;
+ typedef typename _Iterator::reference reference;
+ };
+ template<typename _Tp>
+ struct iterator_traits<_Tp*>
+ {
+ typedef random_access_iterator_tag iterator_category;
+ typedef _Tp value_type;
+ typedef ptrdiff_t difference_type;
+ typedef _Tp* pointer;
+ typedef _Tp& reference;
+ };
+ template<typename _Tp>
+ struct iterator_traits<const _Tp*>
+ {
+ typedef random_access_iterator_tag iterator_category;
+ typedef _Tp value_type;
+ typedef ptrdiff_t difference_type;
+ typedef const _Tp* pointer;
+ typedef const _Tp& reference;
+ };
+ template<typename _Iter>
+ inline typename iterator_traits<_Iter>::iterator_category
+ __iterator_category(const _Iter&)
+ { return typename iterator_traits<_Iter>::iterator_category(); }
+}
+
+namespace std __attribute__ ((__visibility__ ("default"))) {
+ template<typename _InputIterator>
+ inline typename iterator_traits<_InputIterator>::difference_type
+ __distance(_InputIterator __first, _InputIterator __last,
+ input_iterator_tag)
+ {
+
+ typename iterator_traits<_InputIterator>::difference_type __n = 0;
+ while (__first != __last)
+ {
+ ++__first;
+ ++__n;
+ }
+ return __n;
+ }
+ template<typename _RandomAccessIterator>
+ inline typename iterator_traits<_RandomAccessIterator>::difference_type
+ __distance(_RandomAccessIterator __first, _RandomAccessIterator __last,
+ random_access_iterator_tag)
+ {
+
+ return __last - __first;
+ }
+ template<typename _InputIterator>
+ inline typename iterator_traits<_InputIterator>::difference_type
+ distance(_InputIterator __first, _InputIterator __last)
+ {
+ return std::__distance(__first, __last,
+ std::__iterator_category(__first));
+ }
+ template<typename _InputIterator, typename _Distance>
+ inline void
+ __advance(_InputIterator& __i, _Distance __n, input_iterator_tag)
+ {
+
+ while (__n--)
+ ++__i;
+ }
+ template<typename _BidirectionalIterator, typename _Distance>
+ inline void
+ __advance(_BidirectionalIterator& __i, _Distance __n,
+ bidirectional_iterator_tag)
+ {
+
+ if (__n > 0)
+ while (__n--)
+ ++__i;
+ else
+ while (__n++)
+ --__i;
+ }
+ template<typename _RandomAccessIterator, typename _Distance>
+ inline void
+ __advance(_RandomAccessIterator& __i, _Distance __n,
+ random_access_iterator_tag)
+ {
+
+ __i += __n;
+ }
+ template<typename _InputIterator, typename _Distance>
+ inline void
+ advance(_InputIterator& __i, _Distance __n)
+ {
+ typename iterator_traits<_InputIterator>::difference_type __d = __n;
+ std::__advance(__i, __d, std::__iterator_category(__i));
+ }
+}
+namespace std __attribute__ ((__visibility__ ("default"))) {
+ template<typename _Iterator>
+ class reverse_iterator
+ : public iterator<typename iterator_traits<_Iterator>::iterator_category,
+ typename iterator_traits<_Iterator>::value_type,
+ typename iterator_traits<_Iterator>::difference_type,
+ typename iterator_traits<_Iterator>::pointer,
+ typename iterator_traits<_Iterator>::reference>
+ {
+ protected:
+ _Iterator current;
+ public:
+ typedef _Iterator iterator_type;
+ typedef typename iterator_traits<_Iterator>::difference_type
+ difference_type;
+ typedef typename iterator_traits<_Iterator>::reference reference;
+ typedef typename iterator_traits<_Iterator>::pointer pointer;
+ public:
+ reverse_iterator() : current() { }
+ explicit
+ reverse_iterator(iterator_type __x) : current(__x) { }
+ reverse_iterator(const reverse_iterator& __x)
+ : current(__x.current) { }
+ template<typename _Iter>
+ reverse_iterator(const reverse_iterator<_Iter>& __x)
+ : current(__x.base()) { }
+ iterator_type
+ base() const
+ { return current; }
+ reference
+ operator*() const
+ {
+ _Iterator __tmp = current;
+ return *--__tmp;
+ }
+ pointer
+ operator->() const
+ { return &(operator*()); }
+ reverse_iterator&
+ operator++()
+ {
+ --current;
+ return *this;
+ }
+ reverse_iterator
+ operator++(int)
+ {
+ reverse_iterator __tmp = *this;
+ --current;
+ return __tmp;
+ }
+ reverse_iterator&
+ operator--()
+ {
+ ++current;
+ return *this;
+ }
+ reverse_iterator
+ operator--(int)
+ {
+ reverse_iterator __tmp = *this;
+ ++current;
+ return __tmp;
+ }
+ reverse_iterator
+ operator+(difference_type __n) const
+ { return reverse_iterator(current - __n); }
+ reverse_iterator&
+ operator+=(difference_type __n)
+ {
+ current -= __n;
+ return *this;
+ }
+ reverse_iterator
+ operator-(difference_type __n) const
+ { return reverse_iterator(current + __n); }
+ reverse_iterator&
+ operator-=(difference_type __n)
+ {
+ current += __n;
+ return *this;
+ }
+ reference
+ operator[](difference_type __n) const
+ { return *(*this + __n); }
+ };
+ template<typename _Iterator>
+ inline bool
+ operator==(const reverse_iterator<_Iterator>& __x,
+ const reverse_iterator<_Iterator>& __y)
+ { return __x.base() == __y.base(); }
+ template<typename _Iterator>
+ inline bool
+ operator<(const reverse_iterator<_Iterator>& __x,
+ const reverse_iterator<_Iterator>& __y)
+ { return __y.base() < __x.base(); }
+ template<typename _Iterator>
+ inline bool
+ operator!=(const reverse_iterator<_Iterator>& __x,
+ const reverse_iterator<_Iterator>& __y)
+ { return !(__x == __y); }
+ template<typename _Iterator>
+ inline bool
+ operator>(const reverse_iterator<_Iterator>& __x,
+ const reverse_iterator<_Iterator>& __y)
+ { return __y < __x; }
+ template<typename _Iterator>
+ inline bool
+ operator<=(const reverse_iterator<_Iterator>& __x,
+ const reverse_iterator<_Iterator>& __y)
+ { return !(__y < __x); }
+ template<typename _Iterator>
+ inline bool
+ operator>=(const reverse_iterator<_Iterator>& __x,
+ const reverse_iterator<_Iterator>& __y)
+ { return !(__x < __y); }
+ template<typename _Iterator>
+ inline typename reverse_iterator<_Iterator>::difference_type
+ operator-(const reverse_iterator<_Iterator>& __x,
+ const reverse_iterator<_Iterator>& __y)
+ { return __y.base() - __x.base(); }
+ template<typename _Iterator>
+ inline reverse_iterator<_Iterator>
+ operator+(typename reverse_iterator<_Iterator>::difference_type __n,
+ const reverse_iterator<_Iterator>& __x)
+ { return reverse_iterator<_Iterator>(__x.base() - __n); }
+ template<typename _IteratorL, typename _IteratorR>
+ inline bool
+ operator==(const reverse_iterator<_IteratorL>& __x,
+ const reverse_iterator<_IteratorR>& __y)
+ { return __x.base() == __y.base(); }
+ template<typename _IteratorL, typename _IteratorR>
+ inline bool
+ operator<(const reverse_iterator<_IteratorL>& __x,
+ const reverse_iterator<_IteratorR>& __y)
+ { return __y.base() < __x.base(); }
+ template<typename _IteratorL, typename _IteratorR>
+ inline bool
+ operator!=(const reverse_iterator<_IteratorL>& __x,
+ const reverse_iterator<_IteratorR>& __y)
+ { return !(__x == __y); }
+ template<typename _IteratorL, typename _IteratorR>
+ inline bool
+ operator>(const reverse_iterator<_IteratorL>& __x,
+ const reverse_iterator<_IteratorR>& __y)
+ { return __y < __x; }
+ template<typename _IteratorL, typename _IteratorR>
+ inline bool
+ operator<=(const reverse_iterator<_IteratorL>& __x,
+ const reverse_iterator<_IteratorR>& __y)
+ { return !(__y < __x); }
+ template<typename _IteratorL, typename _IteratorR>
+ inline bool
+ operator>=(const reverse_iterator<_IteratorL>& __x,
+ const reverse_iterator<_IteratorR>& __y)
+ { return !(__x < __y); }
+ template<typename _IteratorL, typename _IteratorR>
+ inline typename reverse_iterator<_IteratorL>::difference_type
+ operator-(const reverse_iterator<_IteratorL>& __x,
+ const reverse_iterator<_IteratorR>& __y)
+ { return __y.base() - __x.base(); }
+ template<typename _Container>
+ class back_insert_iterator
+ : public iterator<output_iterator_tag, void, void, void, void>
+ {
+ protected:
+ _Container* container;
+ public:
+ typedef _Container container_type;
+ explicit
+ back_insert_iterator(_Container& __x) : container(&__x) { }
+ back_insert_iterator&
+ operator=(typename _Container::const_reference __value)
+ {
+ container->push_back(__value);
+ return *this;
+ }
+ back_insert_iterator&
+ operator*()
+ { return *this; }
+ back_insert_iterator&
+ operator++()
+ { return *this; }
+ back_insert_iterator
+ operator++(int)
+ { return *this; }
+ };
+ template<typename _Container>
+ inline back_insert_iterator<_Container>
+ back_inserter(_Container& __x)
+ { return back_insert_iterator<_Container>(__x); }
+ template<typename _Container>
+ class front_insert_iterator
+ : public iterator<output_iterator_tag, void, void, void, void>
+ {
+ protected:
+ _Container* container;
+ public:
+ typedef _Container container_type;
+ explicit front_insert_iterator(_Container& __x) : container(&__x) { }
+ front_insert_iterator&
+ operator=(typename _Container::const_reference __value)
+ {
+ container->push_front(__value);
+ return *this;
+ }
+ front_insert_iterator&
+ operator*()
+ { return *this; }
+ front_insert_iterator&
+ operator++()
+ { return *this; }
+ front_insert_iterator
+ operator++(int)
+ { return *this; }
+ };
+ template<typename _Container>
+ inline front_insert_iterator<_Container>
+ front_inserter(_Container& __x)
+ { return front_insert_iterator<_Container>(__x); }
+ template<typename _Container>
+ class insert_iterator
+ : public iterator<output_iterator_tag, void, void, void, void>
+ {
+ protected:
+ _Container* container;
+ typename _Container::iterator iter;
+ public:
+ typedef _Container container_type;
+ insert_iterator(_Container& __x, typename _Container::iterator __i)
+ : container(&__x), iter(__i) {}
+ insert_iterator&
+ operator=(typename _Container::const_reference __value)
+ {
+ iter = container->insert(iter, __value);
+ ++iter;
+ return *this;
+ }
+ insert_iterator&
+ operator*()
+ { return *this; }
+ insert_iterator&
+ operator++()
+ { return *this; }
+ insert_iterator&
+ operator++(int)
+ { return *this; }
+ };
+ template<typename _Container, typename _Iterator>
+ inline insert_iterator<_Container>
+ inserter(_Container& __x, _Iterator __i)
+ {
+ return insert_iterator<_Container>(__x,
+ typename _Container::iterator(__i));
+ }
+}
+namespace __gnu_cxx __attribute__ ((__visibility__ ("default"))) {
+ using std::iterator_traits;
+ using std::iterator;
+ template<typename _Iterator, typename _Container>
+ class __normal_iterator
+ {
+ protected:
+ _Iterator _M_current;
+ public:
+ typedef _Iterator iterator_type;
+ typedef typename iterator_traits<_Iterator>::iterator_category
+ iterator_category;
+ typedef typename iterator_traits<_Iterator>::value_type value_type;
+ typedef typename iterator_traits<_Iterator>::difference_type
+ difference_type;
+ typedef typename iterator_traits<_Iterator>::reference reference;
+ typedef typename iterator_traits<_Iterator>::pointer pointer;
+ __normal_iterator() : _M_current(_Iterator()) { }
+ explicit
+ __normal_iterator(const _Iterator& __i) : _M_current(__i) { }
+ template<typename _Iter>
+ __normal_iterator(const __normal_iterator<_Iter,
+ typename __enable_if<
+ (std::__are_same<_Iter, typename _Container::pointer>::__value),
+ _Container>::__type>& __i)
+ : _M_current(__i.base()) { }
+ reference
+ operator*() const
+ { return *_M_current; }
+ pointer
+ operator->() const
+ { return _M_current; }
+ __normal_iterator&
+ operator++()
+ {
+ ++_M_current;
+ return *this;
+ }
+ __normal_iterator
+ operator++(int)
+ { return __normal_iterator(_M_current++); }
+ __normal_iterator&
+ operator--()
+ {
+ --_M_current;
+ return *this;
+ }
+ __normal_iterator
+ operator--(int)
+ { return __normal_iterator(_M_current--); }
+ reference
+ operator[](const difference_type& __n) const
+ { return _M_current[__n]; }
+ __normal_iterator&
+ operator+=(const difference_type& __n)
+ { _M_current += __n; return *this; }
+ __normal_iterator
+ operator+(const difference_type& __n) const
+ { return __normal_iterator(_M_current + __n); }
+ __normal_iterator&
+ operator-=(const difference_type& __n)
+ { _M_current -= __n; return *this; }
+ __normal_iterator
+ operator-(const difference_type& __n) const
+ { return __normal_iterator(_M_current - __n); }
+ const _Iterator&
+ base() const
+ { return _M_current; }
+ };
+ template<typename _IteratorL, typename _IteratorR, typename _Container>
+ inline bool
+ operator==(const __normal_iterator<_IteratorL, _Container>& __lhs,
+ const __normal_iterator<_IteratorR, _Container>& __rhs)
+ { return __lhs.base() == __rhs.base(); }
+ template<typename _Iterator, typename _Container>
+ inline bool
+ operator==(const __normal_iterator<_Iterator, _Container>& __lhs,
+ const __normal_iterator<_Iterator, _Container>& __rhs)
+ { return __lhs.base() == __rhs.base(); }
+ template<typename _IteratorL, typename _IteratorR, typename _Container>
+ inline bool
+ operator!=(const __normal_iterator<_IteratorL, _Container>& __lhs,
+ const __normal_iterator<_IteratorR, _Container>& __rhs)
+ { return __lhs.base() != __rhs.base(); }
+ template<typename _Iterator, typename _Container>
+ inline bool
+ operator!=(const __normal_iterator<_Iterator, _Container>& __lhs,
+ const __normal_iterator<_Iterator, _Container>& __rhs)
+ { return __lhs.base() != __rhs.base(); }
+ template<typename _IteratorL, typename _IteratorR, typename _Container>
+ inline bool
+ operator<(const __normal_iterator<_IteratorL, _Container>& __lhs,
+ const __normal_iterator<_IteratorR, _Container>& __rhs)
+ { return __lhs.base() < __rhs.base(); }
+ template<typename _Iterator, typename _Container>
+ inline bool
+ operator<(const __normal_iterator<_Iterator, _Container>& __lhs,
+ const __normal_iterator<_Iterator, _Container>& __rhs)
+ { return __lhs.base() < __rhs.base(); }
+ template<typename _IteratorL, typename _IteratorR, typename _Container>
+ inline bool
+ operator>(const __normal_iterator<_IteratorL, _Container>& __lhs,
+ const __normal_iterator<_IteratorR, _Container>& __rhs)
+ { return __lhs.base() > __rhs.base(); }
+ template<typename _Iterator, typename _Container>
+ inline bool
+ operator>(const __normal_iterator<_Iterator, _Container>& __lhs,
+ const __normal_iterator<_Iterator, _Container>& __rhs)
+ { return __lhs.base() > __rhs.base(); }
+ template<typename _IteratorL, typename _IteratorR, typename _Container>
+ inline bool
+ operator<=(const __normal_iterator<_IteratorL, _Container>& __lhs,
+ const __normal_iterator<_IteratorR, _Container>& __rhs)
+ { return __lhs.base() <= __rhs.base(); }
+ template<typename _Iterator, typename _Container>
+ inline bool
+ operator<=(const __normal_iterator<_Iterator, _Container>& __lhs,
+ const __normal_iterator<_Iterator, _Container>& __rhs)
+ { return __lhs.base() <= __rhs.base(); }
+ template<typename _IteratorL, typename _IteratorR, typename _Container>
+ inline bool
+ operator>=(const __normal_iterator<_IteratorL, _Container>& __lhs,
+ const __normal_iterator<_IteratorR, _Container>& __rhs)
+ { return __lhs.base() >= __rhs.base(); }
+ template<typename _Iterator, typename _Container>
+ inline bool
+ operator>=(const __normal_iterator<_Iterator, _Container>& __lhs,
+ const __normal_iterator<_Iterator, _Container>& __rhs)
+ { return __lhs.base() >= __rhs.base(); }
+ template<typename _IteratorL, typename _IteratorR, typename _Container>
+ inline typename __normal_iterator<_IteratorL, _Container>::difference_type
+ operator-(const __normal_iterator<_IteratorL, _Container>& __lhs,
+ const __normal_iterator<_IteratorR, _Container>& __rhs)
+ { return __lhs.base() - __rhs.base(); }
+ template<typename _Iterator, typename _Container>
+ inline typename __normal_iterator<_Iterator, _Container>::difference_type
+ operator-(const __normal_iterator<_Iterator, _Container>& __lhs,
+ const __normal_iterator<_Iterator, _Container>& __rhs)
+ { return __lhs.base() - __rhs.base(); }
+ template<typename _Iterator, typename _Container>
+ inline __normal_iterator<_Iterator, _Container>
+ operator+(typename __normal_iterator<_Iterator, _Container>::difference_type
+ __n, const __normal_iterator<_Iterator, _Container>& __i)
+ { return __normal_iterator<_Iterator, _Container>(__i.base() + __n); }
+}
+namespace std
+{
+ namespace __debug { }
+}
+namespace __gnu_debug
+{
+ using namespace std::__debug;
+}
+namespace std __attribute__ ((__visibility__ ("default"))) {
+ template<bool _BoolType>
+ struct __iter_swap
+ {
+ template<typename _ForwardIterator1, typename _ForwardIterator2>
+ static void
+ iter_swap(_ForwardIterator1 __a, _ForwardIterator2 __b)
+ {
+ typedef typename iterator_traits<_ForwardIterator1>::value_type
+ _ValueType1;
+ _ValueType1 __tmp = (*__a);
+ *__a = (*__b);
+ *__b = (__tmp);
+ }
+ };
+ template<>
+ struct __iter_swap<true>
+ {
+ template<typename _ForwardIterator1, typename _ForwardIterator2>
+ static void
+ iter_swap(_ForwardIterator1 __a, _ForwardIterator2 __b)
+ {
+ swap(*__a, *__b);
+ }
+ };
+ template<typename _ForwardIterator1, typename _ForwardIterator2>
+ inline void
+ iter_swap(_ForwardIterator1 __a, _ForwardIterator2 __b)
+ {
+ typedef typename iterator_traits<_ForwardIterator1>::value_type
+ _ValueType1;
+ typedef typename iterator_traits<_ForwardIterator2>::value_type
+ _ValueType2;
+
+
+
+
+ typedef typename iterator_traits<_ForwardIterator1>::reference
+ _ReferenceType1;
+ typedef typename iterator_traits<_ForwardIterator2>::reference
+ _ReferenceType2;
+ std::__iter_swap<__are_same<_ValueType1, _ValueType2>::__value
+ && __are_same<_ValueType1&, _ReferenceType1>::__value
+ && __are_same<_ValueType2&, _ReferenceType2>::__value>::
+ iter_swap(__a, __b);
+ }
+ template<typename _ForwardIterator1, typename _ForwardIterator2>
+ _ForwardIterator2
+ swap_ranges(_ForwardIterator1 __first1, _ForwardIterator1 __last1,
+ _ForwardIterator2 __first2)
+ {
+
+
+ ;
+ for (; __first1 != __last1; ++__first1, ++__first2)
+ std::iter_swap(__first1, __first2);
+ return __first2;
+ }
+ template<typename _Tp>
+ inline const _Tp&
+ min(const _Tp& __a, const _Tp& __b)
+ {
+
+ if (__b < __a)
+ return __b;
+ return __a;
+ }
+ template<typename _Tp>
+ inline const _Tp&
+ max(const _Tp& __a, const _Tp& __b)
+ {
+
+ if (__a < __b)
+ return __b;
+ return __a;
+ }
+ template<typename _Tp, typename _Compare>
+ inline const _Tp&
+ min(const _Tp& __a, const _Tp& __b, _Compare __comp)
+ {
+ if (__comp(__b, __a))
+ return __b;
+ return __a;
+ }
+ template<typename _Tp, typename _Compare>
+ inline const _Tp&
+ max(const _Tp& __a, const _Tp& __b, _Compare __comp)
+ {
+ if (__comp(__a, __b))
+ return __b;
+ return __a;
+ }
+ template<typename _Iterator,
+ bool _IsNormal = __is_normal_iterator<_Iterator>::__value>
+ struct __niter_base
+ {
+ static _Iterator
+ __b(_Iterator __it)
+ { return __it; }
+ };
+ template<typename _Iterator>
+ struct __niter_base<_Iterator, true>
+ {
+ static typename _Iterator::iterator_type
+ __b(_Iterator __it)
+ { return __it.base(); }
+ };
+ template<typename _Iterator,
+ bool _IsMove = __is_move_iterator<_Iterator>::__value>
+ struct __miter_base
+ {
+ static _Iterator
+ __b(_Iterator __it)
+ { return __it; }
+ };
+ template<typename _Iterator>
+ struct __miter_base<_Iterator, true>
+ {
+ static typename _Iterator::iterator_type
+ __b(_Iterator __it)
+ { return __it.base(); }
+ };
+ template<bool, bool, typename>
+ struct __copy_move
+ {
+ template<typename _II, typename _OI>
+ static _OI
+ __copy_m(_II __first, _II __last, _OI __result)
+ {
+ for (; __first != __last; ++__result, ++__first)
+ *__result = *__first;
+ return __result;
+ }
+ };
+ template<>
+ struct __copy_move<false, false, random_access_iterator_tag>
+ {
+ template<typename _II, typename _OI>
+ static _OI
+ __copy_m(_II __first, _II __last, _OI __result)
+ {
+ typedef typename iterator_traits<_II>::difference_type _Distance;
+ for(_Distance __n = __last - __first; __n > 0; --__n)
+ {
+ *__result = *__first;
+ ++__first;
+ ++__result;
+ }
+ return __result;
+ }
+ };
+ template<bool _IsMove>
+ struct __copy_move<_IsMove, true, random_access_iterator_tag>
+ {
+ template<typename _Tp>
+ static _Tp*
+ __copy_m(const _Tp* __first, const _Tp* __last, _Tp* __result)
+ {
+ __builtin_memmove(__result, __first,
+ sizeof(_Tp) * (__last - __first));
+ return __result + (__last - __first);
+ }
+ };
+ template<bool _IsMove, typename _II, typename _OI>
+ inline _OI
+ __copy_move_a(_II __first, _II __last, _OI __result)
+ {
+ typedef typename iterator_traits<_II>::value_type _ValueTypeI;
+ typedef typename iterator_traits<_OI>::value_type _ValueTypeO;
+ typedef typename iterator_traits<_II>::iterator_category _Category;
+ const bool __simple = (__is_pod(_ValueTypeI)
+ && __is_pointer<_II>::__value
+ && __is_pointer<_OI>::__value
+ && __are_same<_ValueTypeI, _ValueTypeO>::__value);
+ return std::__copy_move<_IsMove, __simple,
+ _Category>::__copy_m(__first, __last, __result);
+ }
+ template<typename _CharT>
+ struct char_traits;
+ template<typename _CharT, typename _Traits>
+ class istreambuf_iterator;
+ template<typename _CharT, typename _Traits>
+ class ostreambuf_iterator;
+ template<bool _IsMove, typename _CharT>
+ typename __gnu_cxx::__enable_if<__is_char<_CharT>::__value,
+ ostreambuf_iterator<_CharT, char_traits<_CharT> > >::__type
+ __copy_move_a2(_CharT*, _CharT*,
+ ostreambuf_iterator<_CharT, char_traits<_CharT> >);
+ template<bool _IsMove, typename _CharT>
+ typename __gnu_cxx::__enable_if<__is_char<_CharT>::__value,
+ ostreambuf_iterator<_CharT, char_traits<_CharT> > >::__type
+ __copy_move_a2(const _CharT*, const _CharT*,
+ ostreambuf_iterator<_CharT, char_traits<_CharT> >);
+ template<bool _IsMove, typename _CharT>
+ typename __gnu_cxx::__enable_if<__is_char<_CharT>::__value,
+ _CharT*>::__type
+ __copy_move_a2(istreambuf_iterator<_CharT, char_traits<_CharT> >,
+ istreambuf_iterator<_CharT, char_traits<_CharT> >, _CharT*);
+ template<bool _IsMove, typename _II, typename _OI>
+ inline _OI
+ __copy_move_a2(_II __first, _II __last, _OI __result)
+ {
+ return _OI(std::__copy_move_a<_IsMove>
+ (std::__niter_base<_II>::__b(__first),
+ std::__niter_base<_II>::__b(__last),
+ std::__niter_base<_OI>::__b(__result)));
+ }
+ template<typename _II, typename _OI>
+ inline _OI
+ copy(_II __first, _II __last, _OI __result)
+ {
+
+
+ ;
+ return (std::__copy_move_a2<__is_move_iterator<_II>::__value>
+ (std::__miter_base<_II>::__b(__first),
+ std::__miter_base<_II>::__b(__last), __result));
+ }
+ template<bool, bool, typename>
+ struct __copy_move_backward
+ {
+ template<typename _BI1, typename _BI2>
+ static _BI2
+ __copy_move_b(_BI1 __first, _BI1 __last, _BI2 __result)
+ {
+ while (__first != __last)
+ *--__result = *--__last;
+ return __result;
+ }
+ };
+ template<>
+ struct __copy_move_backward<false, false, random_access_iterator_tag>
+ {
+ template<typename _BI1, typename _BI2>
+ static _BI2
+ __copy_move_b(_BI1 __first, _BI1 __last, _BI2 __result)
+ {
+ typename iterator_traits<_BI1>::difference_type __n;
+ for (__n = __last - __first; __n > 0; --__n)
+ *--__result = *--__last;
+ return __result;
+ }
+ };
+ template<bool _IsMove>
+ struct __copy_move_backward<_IsMove, true, random_access_iterator_tag>
+ {
+ template<typename _Tp>
+ static _Tp*
+ __copy_move_b(const _Tp* __first, const _Tp* __last, _Tp* __result)
+ {
+ const ptrdiff_t _Num = __last - __first;
+ __builtin_memmove(__result - _Num, __first, sizeof(_Tp) * _Num);
+ return __result - _Num;
+ }
+ };
+ template<bool _IsMove, typename _BI1, typename _BI2>
+ inline _BI2
+ __copy_move_backward_a(_BI1 __first, _BI1 __last, _BI2 __result)
+ {
+ typedef typename iterator_traits<_BI1>::value_type _ValueType1;
+ typedef typename iterator_traits<_BI2>::value_type _ValueType2;
+ typedef typename iterator_traits<_BI1>::iterator_category _Category;
+ const bool __simple = (__is_pod(_ValueType1)
+ && __is_pointer<_BI1>::__value
+ && __is_pointer<_BI2>::__value
+ && __are_same<_ValueType1, _ValueType2>::__value);
+ return std::__copy_move_backward<_IsMove, __simple,
+ _Category>::__copy_move_b(__first,
+ __last,
+ __result);
+ }
+ template<bool _IsMove, typename _BI1, typename _BI2>
+ inline _BI2
+ __copy_move_backward_a2(_BI1 __first, _BI1 __last, _BI2 __result)
+ {
+ return _BI2(std::__copy_move_backward_a<_IsMove>
+ (std::__niter_base<_BI1>::__b(__first),
+ std::__niter_base<_BI1>::__b(__last),
+ std::__niter_base<_BI2>::__b(__result)));
+ }
+ template<typename _BI1, typename _BI2>
+ inline _BI2
+ copy_backward(_BI1 __first, _BI1 __last, _BI2 __result)
+ {
+
+
+
+ ;
+ return (std::__copy_move_backward_a2<__is_move_iterator<_BI1>::__value>
+ (std::__miter_base<_BI1>::__b(__first),
+ std::__miter_base<_BI1>::__b(__last), __result));
+ }
+ template<typename _ForwardIterator, typename _Tp>
+ inline typename
+ __gnu_cxx::__enable_if<!__is_scalar<_Tp>::__value, void>::__type
+ __fill_a(_ForwardIterator __first, _ForwardIterator __last,
+ const _Tp& __value)
+ {
+ for (; __first != __last; ++__first)
+ *__first = __value;
+ }
+ template<typename _ForwardIterator, typename _Tp>
+ inline typename
+ __gnu_cxx::__enable_if<__is_scalar<_Tp>::__value, void>::__type
+ __fill_a(_ForwardIterator __first, _ForwardIterator __last,
+ const _Tp& __value)
+ {
+ const _Tp __tmp = __value;
+ for (; __first != __last; ++__first)
+ *__first = __tmp;
+ }
+ template<typename _Tp>
+ inline typename
+ __gnu_cxx::__enable_if<__is_byte<_Tp>::__value, void>::__type
+ __fill_a(_Tp* __first, _Tp* __last, const _Tp& __c)
+ {
+ const _Tp __tmp = __c;
+ __builtin_memset(__first, static_cast<unsigned char>(__tmp),
+ __last - __first);
+ }
+ template<typename _ForwardIterator, typename _Tp>
+ inline void
+ fill(_ForwardIterator __first, _ForwardIterator __last, const _Tp& __value)
+ {
+
+ ;
+ std::__fill_a(std::__niter_base<_ForwardIterator>::__b(__first),
+ std::__niter_base<_ForwardIterator>::__b(__last), __value);
+ }
+ template<typename _OutputIterator, typename _Size, typename _Tp>
+ inline typename
+ __gnu_cxx::__enable_if<!__is_scalar<_Tp>::__value, _OutputIterator>::__type
+ __fill_n_a(_OutputIterator __first, _Size __n, const _Tp& __value)
+ {
+ for (; __n > 0; --__n, ++__first)
+ *__first = __value;
+ return __first;
+ }
+ template<typename _OutputIterator, typename _Size, typename _Tp>
+ inline typename
+ __gnu_cxx::__enable_if<__is_scalar<_Tp>::__value, _OutputIterator>::__type
+ __fill_n_a(_OutputIterator __first, _Size __n, const _Tp& __value)
+ {
+ const _Tp __tmp = __value;
+ for (; __n > 0; --__n, ++__first)
+ *__first = __tmp;
+ return __first;
+ }
+ template<typename _Size, typename _Tp>
+ inline typename
+ __gnu_cxx::__enable_if<__is_byte<_Tp>::__value, _Tp*>::__type
+ __fill_n_a(_Tp* __first, _Size __n, const _Tp& __c)
+ {
+ std::__fill_a(__first, __first + __n, __c);
+ return __first + __n;
+ }
+ template<typename _OI, typename _Size, typename _Tp>
+ inline _OI
+ fill_n(_OI __first, _Size __n, const _Tp& __value)
+ {
+
+ return _OI(std::__fill_n_a(std::__niter_base<_OI>::__b(__first),
+ __n, __value));
+ }
+ template<bool _BoolType>
+ struct __equal
+ {
+ template<typename _II1, typename _II2>
+ static bool
+ equal(_II1 __first1, _II1 __last1, _II2 __first2)
+ {
+ for (; __first1 != __last1; ++__first1, ++__first2)
+ if (!(*__first1 == *__first2))
+ return false;
+ return true;
+ }
+ };
+ template<>
+ struct __equal<true>
+ {
+ template<typename _Tp>
+ static bool
+ equal(const _Tp* __first1, const _Tp* __last1, const _Tp* __first2)
+ {
+ return !__builtin_memcmp(__first1, __first2, sizeof(_Tp)
+ * (__last1 - __first1));
+ }
+ };
+ template<typename _II1, typename _II2>
+ inline bool
+ __equal_aux(_II1 __first1, _II1 __last1, _II2 __first2)
+ {
+ typedef typename iterator_traits<_II1>::value_type _ValueType1;
+ typedef typename iterator_traits<_II2>::value_type _ValueType2;
+ const bool __simple = (__is_integer<_ValueType1>::__value
+ && __is_pointer<_II1>::__value
+ && __is_pointer<_II2>::__value
+ && __are_same<_ValueType1, _ValueType2>::__value);
+ return std::__equal<__simple>::equal(__first1, __last1, __first2);
+ }
+ template<typename, typename>
+ struct __lc_rai
+ {
+ template<typename _II1, typename _II2>
+ static _II1
+ __newlast1(_II1, _II1 __last1, _II2, _II2)
+ { return __last1; }
+ template<typename _II>
+ static bool
+ __cnd2(_II __first, _II __last)
+ { return __first != __last; }
+ };
+ template<>
+ struct __lc_rai<random_access_iterator_tag, random_access_iterator_tag>
+ {
+ template<typename _RAI1, typename _RAI2>
+ static _RAI1
+ __newlast1(_RAI1 __first1, _RAI1 __last1,
+ _RAI2 __first2, _RAI2 __last2)
+ {
+ const typename iterator_traits<_RAI1>::difference_type
+ __diff1 = __last1 - __first1;
+ const typename iterator_traits<_RAI2>::difference_type
+ __diff2 = __last2 - __first2;
+ return __diff2 < __diff1 ? __first1 + __diff2 : __last1;
+ }
+ template<typename _RAI>
+ static bool
+ __cnd2(_RAI, _RAI)
+ { return true; }
+ };
+ template<bool _BoolType>
+ struct __lexicographical_compare
+ {
+ template<typename _II1, typename _II2>
+ static bool __lc(_II1, _II1, _II2, _II2);
+ };
+ template<bool _BoolType>
+ template<typename _II1, typename _II2>
+ bool
+ __lexicographical_compare<_BoolType>::
+ __lc(_II1 __first1, _II1 __last1, _II2 __first2, _II2 __last2)
+ {
+ typedef typename iterator_traits<_II1>::iterator_category _Category1;
+ typedef typename iterator_traits<_II2>::iterator_category _Category2;
+ typedef std::__lc_rai<_Category1, _Category2> __rai_type;
+ __last1 = __rai_type::__newlast1(__first1, __last1,
+ __first2, __last2);
+ for (; __first1 != __last1 && __rai_type::__cnd2(__first2, __last2);
+ ++__first1, ++__first2)
+ {
+ if (*__first1 < *__first2)
+ return true;
+ if (*__first2 < *__first1)
+ return false;
+ }
+ return __first1 == __last1 && __first2 != __last2;
+ }
+ template<>
+ struct __lexicographical_compare<true>
+ {
+ template<typename _Tp, typename _Up>
+ static bool
+ __lc(const _Tp* __first1, const _Tp* __last1,
+ const _Up* __first2, const _Up* __last2)
+ {
+ const size_t __len1 = __last1 - __first1;
+ const size_t __len2 = __last2 - __first2;
+ const int __result = __builtin_memcmp(__first1, __first2,
+ std::min(__len1, __len2));
+ return __result != 0 ? __result < 0 : __len1 < __len2;
+ }
+ };
+ template<typename _II1, typename _II2>
+ inline bool
+ __lexicographical_compare_aux(_II1 __first1, _II1 __last1,
+ _II2 __first2, _II2 __last2)
+ {
+ typedef typename iterator_traits<_II1>::value_type _ValueType1;
+ typedef typename iterator_traits<_II2>::value_type _ValueType2;
+ const bool __simple =
+ (__is_byte<_ValueType1>::__value && __is_byte<_ValueType2>::__value
+ && !__gnu_cxx::__numeric_traits<_ValueType1>::__is_signed
+ && !__gnu_cxx::__numeric_traits<_ValueType2>::__is_signed
+ && __is_pointer<_II1>::__value
+ && __is_pointer<_II2>::__value);
+ return std::__lexicographical_compare<__simple>::__lc(__first1, __last1,
+ __first2, __last2);
+ }
+}
+namespace std __attribute__ ((__visibility__ ("default"))) {
+ template<typename _II1, typename _II2>
+ inline bool
+ equal(_II1 __first1, _II1 __last1, _II2 __first2)
+ {
+
+
+
+ ;
+ return std::__equal_aux(std::__niter_base<_II1>::__b(__first1),
+ std::__niter_base<_II1>::__b(__last1),
+ std::__niter_base<_II2>::__b(__first2));
+ }
+ template<typename _IIter1, typename _IIter2, typename _BinaryPredicate>
+ inline bool
+ equal(_IIter1 __first1, _IIter1 __last1,
+ _IIter2 __first2, _BinaryPredicate __binary_pred)
+ {
+
+
+ ;
+ for (; __first1 != __last1; ++__first1, ++__first2)
+ if (!bool(__binary_pred(*__first1, *__first2)))
+ return false;
+ return true;
+ }
+ template<typename _II1, typename _II2>
+ inline bool
+ lexicographical_compare(_II1 __first1, _II1 __last1,
+ _II2 __first2, _II2 __last2)
+ {
+ typedef typename iterator_traits<_II1>::value_type _ValueType1;
+ typedef typename iterator_traits<_II2>::value_type _ValueType2;
+
+
+
+
+ ;
+ ;
+ return std::__lexicographical_compare_aux
+ (std::__niter_base<_II1>::__b(__first1),
+ std::__niter_base<_II1>::__b(__last1),
+ std::__niter_base<_II2>::__b(__first2),
+ std::__niter_base<_II2>::__b(__last2));
+ }
+ template<typename _II1, typename _II2, typename _Compare>
+ bool
+ lexicographical_compare(_II1 __first1, _II1 __last1,
+ _II2 __first2, _II2 __last2, _Compare __comp)
+ {
+ typedef typename iterator_traits<_II1>::iterator_category _Category1;
+ typedef typename iterator_traits<_II2>::iterator_category _Category2;
+ typedef std::__lc_rai<_Category1, _Category2> __rai_type;
+
+
+ ;
+ ;
+ __last1 = __rai_type::__newlast1(__first1, __last1, __first2, __last2);
+ for (; __first1 != __last1 && __rai_type::__cnd2(__first2, __last2);
+ ++__first1, ++__first2)
+ {
+ if (__comp(*__first1, *__first2))
+ return true;
+ if (__comp(*__first2, *__first1))
+ return false;
+ }
+ return __first1 == __last1 && __first2 != __last2;
+ }
+ template<typename _InputIterator1, typename _InputIterator2>
+ pair<_InputIterator1, _InputIterator2>
+ mismatch(_InputIterator1 __first1, _InputIterator1 __last1,
+ _InputIterator2 __first2)
+ {
+
+
+
+ ;
+ while (__first1 != __last1 && *__first1 == *__first2)
+ {
+ ++__first1;
+ ++__first2;
+ }
+ return pair<_InputIterator1, _InputIterator2>(__first1, __first2);
+ }
+ template<typename _InputIterator1, typename _InputIterator2,
+ typename _BinaryPredicate>
+ pair<_InputIterator1, _InputIterator2>
+ mismatch(_InputIterator1 __first1, _InputIterator1 __last1,
+ _InputIterator2 __first2, _BinaryPredicate __binary_pred)
+ {
+
+
+ ;
+ while (__first1 != __last1 && bool(__binary_pred(*__first1, *__first2)))
+ {
+ ++__first1;
+ ++__first2;
+ }
+ return pair<_InputIterator1, _InputIterator2>(__first1, __first2);
+ }
+}
+
+extern "C++" {
+namespace std
+{
+ class exception
+ {
+ public:
+ exception() throw() { }
+ virtual ~exception() throw();
+ virtual const char* what() const throw();
+ };
+ class bad_exception : public exception
+ {
+ public:
+ bad_exception() throw() { }
+ virtual ~bad_exception() throw();
+ virtual const char* what() const throw();
+ };
+ typedef void (*terminate_handler) ();
+ typedef void (*unexpected_handler) ();
+ terminate_handler set_terminate(terminate_handler) throw();
+ void terminate() __attribute__ ((__noreturn__));
+ unexpected_handler set_unexpected(unexpected_handler) throw();
+ void unexpected() __attribute__ ((__noreturn__));
+ bool uncaught_exception() throw();
+}
+namespace __gnu_cxx __attribute__ ((__visibility__ ("default"))) {
+ void __verbose_terminate_handler();
+}
+}
+extern "C++" {
+namespace std
+{
+ class bad_alloc : public exception
+ {
+ public:
+ bad_alloc() throw() { }
+ virtual ~bad_alloc() throw();
+ virtual const char* what() const throw();
+ };
+ struct nothrow_t { };
+ extern const nothrow_t nothrow;
+ typedef void (*new_handler)();
+ new_handler set_new_handler(new_handler) throw();
+}
+void* operator new(std::size_t) throw (std::bad_alloc);
+void* operator new[](std::size_t) throw (std::bad_alloc);
+void operator delete(void*) throw();
+void operator delete[](void*) throw();
+void* operator new(std::size_t, const std::nothrow_t&) throw();
+void* operator new[](std::size_t, const std::nothrow_t&) throw();
+void operator delete(void*, const std::nothrow_t&) throw();
+void operator delete[](void*, const std::nothrow_t&) throw();
+inline void* operator new(std::size_t, void* __p) throw() { return __p; }
+inline void* operator new[](std::size_t, void* __p) throw() { return __p; }
+inline void operator delete (void*, void*) throw() { }
+inline void operator delete[](void*, void*) throw() { }
+}
+namespace __gnu_cxx __attribute__ ((__visibility__ ("default"))) {
+ using std::size_t;
+ using std::ptrdiff_t;
+ template<typename _Tp>
+ class new_allocator
+ {
+ public:
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ typedef _Tp* pointer;
+ typedef const _Tp* const_pointer;
+ typedef _Tp& reference;
+ typedef const _Tp& const_reference;
+ typedef _Tp value_type;
+ template<typename _Tp1>
+ struct rebind
+ { typedef new_allocator<_Tp1> other; };
+ new_allocator() throw() { }
+ new_allocator(const new_allocator&) throw() { }
+ template<typename _Tp1>
+ new_allocator(const new_allocator<_Tp1>&) throw() { }
+ ~new_allocator() throw() { }
+ pointer
+ address(reference __x) const { return &__x; }
+ const_pointer
+ address(const_reference __x) const { return &__x; }
+ pointer
+ allocate(size_type __n, const void* = 0)
+ {
+ if (__builtin_expect(__n > this->max_size(), false))
+ std::__throw_bad_alloc();
+ return static_cast<_Tp*>(::operator new(__n * sizeof(_Tp)));
+ }
+ void
+ deallocate(pointer __p, size_type)
+ { ::operator delete(__p); }
+ size_type
+ max_size() const throw()
+ { return size_t(-1) / sizeof(_Tp); }
+ void
+ construct(pointer __p, const _Tp& __val)
+ { ::new((void *)__p) _Tp(__val); }
+ void
+ destroy(pointer __p) { __p->~_Tp(); }
+ };
+ template<typename _Tp>
+ inline bool
+ operator==(const new_allocator<_Tp>&, const new_allocator<_Tp>&)
+ { return true; }
+ template<typename _Tp>
+ inline bool
+ operator!=(const new_allocator<_Tp>&, const new_allocator<_Tp>&)
+ { return false; }
+}
+namespace std __attribute__ ((__visibility__ ("default"))) {
+ template<typename _Tp>
+ class allocator;
+ template<>
+ class allocator<void>
+ {
+ public:
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ typedef void* pointer;
+ typedef const void* const_pointer;
+ typedef void value_type;
+ template<typename _Tp1>
+ struct rebind
+ { typedef allocator<_Tp1> other; };
+ };
+ template<typename _Tp>
+ class allocator: public __gnu_cxx::new_allocator<_Tp>
+ {
+ public:
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ typedef _Tp* pointer;
+ typedef const _Tp* const_pointer;
+ typedef _Tp& reference;
+ typedef const _Tp& const_reference;
+ typedef _Tp value_type;
+ template<typename _Tp1>
+ struct rebind
+ { typedef allocator<_Tp1> other; };
+ allocator() throw() { }
+ allocator(const allocator& __a) throw()
+ : __gnu_cxx::new_allocator<_Tp>(__a) { }
+ template<typename _Tp1>
+ allocator(const allocator<_Tp1>&) throw() { }
+ ~allocator() throw() { }
+ };
+ template<typename _T1, typename _T2>
+ inline bool
+ operator==(const allocator<_T1>&, const allocator<_T2>&)
+ { return true; }
+ template<typename _Tp>
+ inline bool
+ operator==(const allocator<_Tp>&, const allocator<_Tp>&)
+ { return true; }
+ template<typename _T1, typename _T2>
+ inline bool
+ operator!=(const allocator<_T1>&, const allocator<_T2>&)
+ { return false; }
+ template<typename _Tp>
+ inline bool
+ operator!=(const allocator<_Tp>&, const allocator<_Tp>&)
+ { return false; }
+ extern template class allocator<char>;
+ extern template class allocator<wchar_t>;
+ template<typename _Alloc, bool = __is_empty(_Alloc)>
+ struct __alloc_swap
+ { static void _S_do_it(_Alloc&, _Alloc&) { } };
+ template<typename _Alloc>
+ struct __alloc_swap<_Alloc, false>
+ {
+ static void
+ _S_do_it(_Alloc& __one, _Alloc& __two)
+ {
+ if (__one != __two)
+ swap(__one, __two);
+ }
+ };
+ template<typename _Alloc, bool = __is_empty(_Alloc)>
+ struct __alloc_neq
+ {
+ static bool
+ _S_do_it(const _Alloc&, const _Alloc&)
+ { return false; }
+ };
+ template<typename _Alloc>
+ struct __alloc_neq<_Alloc, false>
+ {
+ static bool
+ _S_do_it(const _Alloc& __one, const _Alloc& __two)
+ { return __one != __two; }
+ };
+}
+namespace std __attribute__ ((__visibility__ ("default"))) {
+ struct _List_node_base
+ {
+ _List_node_base* _M_next;
+ _List_node_base* _M_prev;
+ static void
+ swap(_List_node_base& __x, _List_node_base& __y);
+ void
+ transfer(_List_node_base * const __first,
+ _List_node_base * const __last);
+ void
+ reverse();
+ void
+ hook(_List_node_base * const __position);
+ void
+ unhook();
+ };
+ template<typename _Tp>
+ struct _List_node : public _List_node_base
+ {
+ _Tp _M_data;
+ };
+ template<typename _Tp>
+ struct _List_iterator
+ {
+ typedef _List_iterator<_Tp> _Self;
+ typedef _List_node<_Tp> _Node;
+ typedef ptrdiff_t difference_type;
+ typedef std::bidirectional_iterator_tag iterator_category;
+ typedef _Tp value_type;
+ typedef _Tp* pointer;
+ typedef _Tp& reference;
+ _List_iterator()
+ : _M_node() { }
+ explicit
+ _List_iterator(_List_node_base* __x)
+ : _M_node(__x) { }
+ reference
+ operator*() const
+ { return static_cast<_Node*>(_M_node)->_M_data; }
+ pointer
+ operator->() const
+ { return &static_cast<_Node*>(_M_node)->_M_data; }
+ _Self&
+ operator++()
+ {
+ _M_node = _M_node->_M_next;
+ return *this;
+ }
+ _Self
+ operator++(int)
+ {
+ _Self __tmp = *this;
+ _M_node = _M_node->_M_next;
+ return __tmp;
+ }
+ _Self&
+ operator--()
+ {
+ _M_node = _M_node->_M_prev;
+ return *this;
+ }
+ _Self
+ operator--(int)
+ {
+ _Self __tmp = *this;
+ _M_node = _M_node->_M_prev;
+ return __tmp;
+ }
+ bool
+ operator==(const _Self& __x) const
+ { return _M_node == __x._M_node; }
+ bool
+ operator!=(const _Self& __x) const
+ { return _M_node != __x._M_node; }
+ _List_node_base* _M_node;
+ };
+ template<typename _Tp>
+ struct _List_const_iterator
+ {
+ typedef _List_const_iterator<_Tp> _Self;
+ typedef const _List_node<_Tp> _Node;
+ typedef _List_iterator<_Tp> iterator;
+ typedef ptrdiff_t difference_type;
+ typedef std::bidirectional_iterator_tag iterator_category;
+ typedef _Tp value_type;
+ typedef const _Tp* pointer;
+ typedef const _Tp& reference;
+ _List_const_iterator()
+ : _M_node() { }
+ explicit
+ _List_const_iterator(const _List_node_base* __x)
+ : _M_node(__x) { }
+ _List_const_iterator(const iterator& __x)
+ : _M_node(__x._M_node) { }
+ reference
+ operator*() const
+ { return static_cast<_Node*>(_M_node)->_M_data; }
+ pointer
+ operator->() const
+ { return &static_cast<_Node*>(_M_node)->_M_data; }
+ _Self&
+ operator++()
+ {
+ _M_node = _M_node->_M_next;
+ return *this;
+ }
+ _Self
+ operator++(int)
+ {
+ _Self __tmp = *this;
+ _M_node = _M_node->_M_next;
+ return __tmp;
+ }
+ _Self&
+ operator--()
+ {
+ _M_node = _M_node->_M_prev;
+ return *this;
+ }
+ _Self
+ operator--(int)
+ {
+ _Self __tmp = *this;
+ _M_node = _M_node->_M_prev;
+ return __tmp;
+ }
+ bool
+ operator==(const _Self& __x) const
+ { return _M_node == __x._M_node; }
+ bool
+ operator!=(const _Self& __x) const
+ { return _M_node != __x._M_node; }
+ const _List_node_base* _M_node;
+ };
+ template<typename _Val>
+ inline bool
+ operator==(const _List_iterator<_Val>& __x,
+ const _List_const_iterator<_Val>& __y)
+ { return __x._M_node == __y._M_node; }
+ template<typename _Val>
+ inline bool
+ operator!=(const _List_iterator<_Val>& __x,
+ const _List_const_iterator<_Val>& __y)
+ { return __x._M_node != __y._M_node; }
+ template<typename _Tp, typename _Alloc>
+ class _List_base
+ {
+ protected:
+ typedef typename _Alloc::template rebind<_List_node<_Tp> >::other
+ _Node_alloc_type;
+ typedef typename _Alloc::template rebind<_Tp>::other _Tp_alloc_type;
+ struct _List_impl
+ : public _Node_alloc_type
+ {
+ _List_node_base _M_node;
+ _List_impl()
+ : _Node_alloc_type(), _M_node()
+ { }
+ _List_impl(const _Node_alloc_type& __a)
+ : _Node_alloc_type(__a), _M_node()
+ { }
+ };
+ _List_impl _M_impl;
+ _List_node<_Tp>*
+ _M_get_node()
+ { return _M_impl._Node_alloc_type::allocate(1); }
+ void
+ _M_put_node(_List_node<_Tp>* __p)
+ { _M_impl._Node_alloc_type::deallocate(__p, 1); }
+ public:
+ typedef _Alloc allocator_type;
+ _Node_alloc_type&
+ _M_get_Node_allocator()
+ { return *static_cast<_Node_alloc_type*>(&this->_M_impl); }
+ const _Node_alloc_type&
+ _M_get_Node_allocator() const
+ { return *static_cast<const _Node_alloc_type*>(&this->_M_impl); }
+ _Tp_alloc_type
+ _M_get_Tp_allocator() const
+ { return _Tp_alloc_type(_M_get_Node_allocator()); }
+ allocator_type
+ get_allocator() const
+ { return allocator_type(_M_get_Node_allocator()); }
+ _List_base()
+ : _M_impl()
+ { _M_init(); }
+ _List_base(const allocator_type& __a)
+ : _M_impl(__a)
+ { _M_init(); }
+ ~_List_base()
+ { _M_clear(); }
+ void
+ _M_clear();
+ void
+ _M_init()
+ {
+ this->_M_impl._M_node._M_next = &this->_M_impl._M_node;
+ this->_M_impl._M_node._M_prev = &this->_M_impl._M_node;
+ }
+ };
+ template<typename _Tp, typename _Alloc = std::allocator<_Tp> >
+ class list : protected _List_base<_Tp, _Alloc>
+ {
+ typedef typename _Alloc::value_type _Alloc_value_type;
+
+
+ typedef _List_base<_Tp, _Alloc> _Base;
+ typedef typename _Base::_Tp_alloc_type _Tp_alloc_type;
+ public:
+ typedef _Tp value_type;
+ typedef typename _Tp_alloc_type::pointer pointer;
+ typedef typename _Tp_alloc_type::const_pointer const_pointer;
+ typedef typename _Tp_alloc_type::reference reference;
+ typedef typename _Tp_alloc_type::const_reference const_reference;
+ typedef _List_iterator<_Tp> iterator;
+ typedef _List_const_iterator<_Tp> const_iterator;
+ typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
+ typedef std::reverse_iterator<iterator> reverse_iterator;
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ typedef _Alloc allocator_type;
+ protected:
+ typedef _List_node<_Tp> _Node;
+ using _Base::_M_impl;
+ using _Base::_M_put_node;
+ using _Base::_M_get_node;
+ using _Base::_M_get_Tp_allocator;
+ using _Base::_M_get_Node_allocator;
+ _Node*
+ _M_create_node(const value_type& __x)
+ {
+ _Node* __p = this->_M_get_node();
+ try
+ {
+ _M_get_Tp_allocator().construct(&__p->_M_data, __x);
+ }
+ catch(...)
+ {
+ _M_put_node(__p);
+ throw;
+ }
+ return __p;
+ }
+ public:
+ list()
+ : _Base() { }
+ explicit
+ list(const allocator_type& __a)
+ : _Base(__a) { }
+ explicit
+ list(size_type __n, const value_type& __value = value_type(),
+ const allocator_type& __a = allocator_type())
+ : _Base(__a)
+ { _M_fill_initialize(__n, __value); }
+ list(const list& __x)
+ : _Base(__x._M_get_Node_allocator())
+ { _M_initialize_dispatch(__x.begin(), __x.end(), __false_type()); }
+ template<typename _InputIterator>
+ list(_InputIterator __first, _InputIterator __last,
+ const allocator_type& __a = allocator_type())
+ : _Base(__a)
+ {
+ typedef typename std::__is_integer<_InputIterator>::__type _Integral;
+ _M_initialize_dispatch(__first, __last, _Integral());
+ }
+ list&
+ operator=(const list& __x);
+ void
+ assign(size_type __n, const value_type& __val)
+ { _M_fill_assign(__n, __val); }
+ template<typename _InputIterator>
+ void
+ assign(_InputIterator __first, _InputIterator __last)
+ {
+ typedef typename std::__is_integer<_InputIterator>::__type _Integral;
+ _M_assign_dispatch(__first, __last, _Integral());
+ }
+ allocator_type
+ get_allocator() const
+ { return _Base::get_allocator(); }
+ iterator
+ begin()
+ { return iterator(this->_M_impl._M_node._M_next); }
+ const_iterator
+ begin() const
+ { return const_iterator(this->_M_impl._M_node._M_next); }
+ iterator
+ end()
+ { return iterator(&this->_M_impl._M_node); }
+ const_iterator
+ end() const
+ { return const_iterator(&this->_M_impl._M_node); }
+ reverse_iterator
+ rbegin()
+ { return reverse_iterator(end()); }
+ const_reverse_iterator
+ rbegin() const
+ { return const_reverse_iterator(end()); }
+ reverse_iterator
+ rend()
+ { return reverse_iterator(begin()); }
+ const_reverse_iterator
+ rend() const
+ { return const_reverse_iterator(begin()); }
+ bool
+ empty() const
+ { return this->_M_impl._M_node._M_next == &this->_M_impl._M_node; }
+ size_type
+ size() const
+ { return std::distance(begin(), end()); }
+ size_type
+ max_size() const
+ { return _M_get_Node_allocator().max_size(); }
+ void
+ resize(size_type __new_size, value_type __x = value_type());
+ reference
+ front()
+ { return *begin(); }
+ const_reference
+ front() const
+ { return *begin(); }
+ reference
+ back()
+ {
+ iterator __tmp = end();
+ --__tmp;
+ return *__tmp;
+ }
+ const_reference
+ back() const
+ {
+ const_iterator __tmp = end();
+ --__tmp;
+ return *__tmp;
+ }
+ void
+ push_front(const value_type& __x)
+ { this->_M_insert(begin(), __x); }
+ void
+ pop_front()
+ { this->_M_erase(begin()); }
+ void
+ push_back(const value_type& __x)
+ { this->_M_insert(end(), __x); }
+ void
+ pop_back()
+ { this->_M_erase(iterator(this->_M_impl._M_node._M_prev)); }
+ iterator
+ insert(iterator __position, const value_type& __x);
+ void
+ insert(iterator __position, size_type __n, const value_type& __x)
+ {
+ list __tmp(__n, __x, _M_get_Node_allocator());
+ splice(__position, __tmp);
+ }
+ template<typename _InputIterator>
+ void
+ insert(iterator __position, _InputIterator __first,
+ _InputIterator __last)
+ {
+ list __tmp(__first, __last, _M_get_Node_allocator());
+ splice(__position, __tmp);
+ }
+ iterator
+ erase(iterator __position);
+ iterator
+ erase(iterator __first, iterator __last)
+ {
+ while (__first != __last)
+ __first = erase(__first);
+ return __last;
+ }
+ void
+ swap(list& __x)
+ {
+ _List_node_base::swap(this->_M_impl._M_node, __x._M_impl._M_node);
+ std::__alloc_swap<typename _Base::_Node_alloc_type>::
+ _S_do_it(_M_get_Node_allocator(), __x._M_get_Node_allocator());
+ }
+ void
+ clear()
+ {
+ _Base::_M_clear();
+ _Base::_M_init();
+ }
+ void
+ splice(iterator __position, list& __x)
+ {
+ if (!__x.empty())
+ {
+ _M_check_equal_allocators(__x);
+ this->_M_transfer(__position, __x.begin(), __x.end());
+ }
+ }
+ void
+ splice(iterator __position, list& __x, iterator __i)
+ {
+ iterator __j = __i;
+ ++__j;
+ if (__position == __i || __position == __j)
+ return;
+ if (this != &__x)
+ _M_check_equal_allocators(__x);
+ this->_M_transfer(__position, __i, __j);
+ }
+ void
+ splice(iterator __position, list& __x, iterator __first,
+ iterator __last)
+ {
+ if (__first != __last)
+ {
+ if (this != &__x)
+ _M_check_equal_allocators(__x);
+ this->_M_transfer(__position, __first, __last);
+ }
+ }
+ void
+ remove(const _Tp& __value);
+ template<typename _Predicate>
+ void
+ remove_if(_Predicate);
+ void
+ unique();
+ template<typename _BinaryPredicate>
+ void
+ unique(_BinaryPredicate);
+ void
+ merge(list& __x);
+ template<typename _StrictWeakOrdering>
+ void
+ merge(list&, _StrictWeakOrdering);
+ void
+ reverse()
+ { this->_M_impl._M_node.reverse(); }
+ void
+ sort();
+ template<typename _StrictWeakOrdering>
+ void
+ sort(_StrictWeakOrdering);
+ protected:
+ template<typename _Integer>
+ void
+ _M_initialize_dispatch(_Integer __n, _Integer __x, __true_type)
+ { _M_fill_initialize(static_cast<size_type>(__n), __x); }
+ template<typename _InputIterator>
+ void
+ _M_initialize_dispatch(_InputIterator __first, _InputIterator __last,
+ __false_type)
+ {
+ for (; __first != __last; ++__first)
+ push_back(*__first);
+ }
+ void
+ _M_fill_initialize(size_type __n, const value_type& __x)
+ {
+ for (; __n > 0; --__n)
+ push_back(__x);
+ }
+ template<typename _Integer>
+ void
+ _M_assign_dispatch(_Integer __n, _Integer __val, __true_type)
+ { _M_fill_assign(__n, __val); }
+ template<typename _InputIterator>
+ void
+ _M_assign_dispatch(_InputIterator __first, _InputIterator __last,
+ __false_type);
+ void
+ _M_fill_assign(size_type __n, const value_type& __val);
+ void
+ _M_transfer(iterator __position, iterator __first, iterator __last)
+ { __position._M_node->transfer(__first._M_node, __last._M_node); }
+ void
+ _M_insert(iterator __position, const value_type& __x)
+ {
+ _Node* __tmp = _M_create_node(__x);
+ __tmp->hook(__position._M_node);
+ }
+ void
+ _M_erase(iterator __position)
+ {
+ __position._M_node->unhook();
+ _Node* __n = static_cast<_Node*>(__position._M_node);
+ _M_get_Tp_allocator().destroy(&__n->_M_data);
+ _M_put_node(__n);
+ }
+ void
+ _M_check_equal_allocators(list& __x)
+ {
+ if (std::__alloc_neq<typename _Base::_Node_alloc_type>::
+ _S_do_it(_M_get_Node_allocator(), __x._M_get_Node_allocator()))
+ __throw_runtime_error(("list::_M_check_equal_allocators"));
+ }
+ };
+ template<typename _Tp, typename _Alloc>
+ inline bool
+ operator==(const list<_Tp, _Alloc>& __x, const list<_Tp, _Alloc>& __y)
+ {
+ typedef typename list<_Tp, _Alloc>::const_iterator const_iterator;
+ const_iterator __end1 = __x.end();
+ const_iterator __end2 = __y.end();
+ const_iterator __i1 = __x.begin();
+ const_iterator __i2 = __y.begin();
+ while (__i1 != __end1 && __i2 != __end2 && *__i1 == *__i2)
+ {
+ ++__i1;
+ ++__i2;
+ }
+ return __i1 == __end1 && __i2 == __end2;
+ }
+ template<typename _Tp, typename _Alloc>
+ inline bool
+ operator<(const list<_Tp, _Alloc>& __x, const list<_Tp, _Alloc>& __y)
+ { return std::lexicographical_compare(__x.begin(), __x.end(),
+ __y.begin(), __y.end()); }
+ template<typename _Tp, typename _Alloc>
+ inline bool
+ operator!=(const list<_Tp, _Alloc>& __x, const list<_Tp, _Alloc>& __y)
+ { return !(__x == __y); }
+ template<typename _Tp, typename _Alloc>
+ inline bool
+ operator>(const list<_Tp, _Alloc>& __x, const list<_Tp, _Alloc>& __y)
+ { return __y < __x; }
+ template<typename _Tp, typename _Alloc>
+ inline bool
+ operator<=(const list<_Tp, _Alloc>& __x, const list<_Tp, _Alloc>& __y)
+ { return !(__y < __x); }
+ template<typename _Tp, typename _Alloc>
+ inline bool
+ operator>=(const list<_Tp, _Alloc>& __x, const list<_Tp, _Alloc>& __y)
+ { return !(__x < __y); }
+ template<typename _Tp, typename _Alloc>
+ inline void
+ swap(list<_Tp, _Alloc>& __x, list<_Tp, _Alloc>& __y)
+ { __x.swap(__y); }
+}
+namespace std __attribute__ ((__visibility__ ("default"))) {
+ template<typename _Tp, typename _Alloc>
+ void
+ _List_base<_Tp, _Alloc>::
+ _M_clear()
+ {
+ typedef _List_node<_Tp> _Node;
+ _Node* __cur = static_cast<_Node*>(this->_M_impl._M_node._M_next);
+ while (__cur != &this->_M_impl._M_node)
+ {
+ _Node* __tmp = __cur;
+ __cur = static_cast<_Node*>(__cur->_M_next);
+ _M_get_Tp_allocator().destroy(&__tmp->_M_data);
+ _M_put_node(__tmp);
+ }
+ }
+ template<typename _Tp, typename _Alloc>
+ typename list<_Tp, _Alloc>::iterator
+ list<_Tp, _Alloc>::
+ insert(iterator __position, const value_type& __x)
+ {
+ _Node* __tmp = _M_create_node(__x);
+ __tmp->hook(__position._M_node);
+ return iterator(__tmp);
+ }
+ template<typename _Tp, typename _Alloc>
+ typename list<_Tp, _Alloc>::iterator
+ list<_Tp, _Alloc>::
+ erase(iterator __position)
+ {
+ iterator __ret = iterator(__position._M_node->_M_next);
+ _M_erase(__position);
+ return __ret;
+ }
+ template<typename _Tp, typename _Alloc>
+ void
+ list<_Tp, _Alloc>::
+ resize(size_type __new_size, value_type __x)
+ {
+ iterator __i = begin();
+ size_type __len = 0;
+ for (; __i != end() && __len < __new_size; ++__i, ++__len)
+ ;
+ if (__len == __new_size)
+ erase(__i, end());
+ else
+ insert(end(), __new_size - __len, __x);
+ }
+ template<typename _Tp, typename _Alloc>
+ list<_Tp, _Alloc>&
+ list<_Tp, _Alloc>::
+ operator=(const list& __x)
+ {
+ if (this != &__x)
+ {
+ iterator __first1 = begin();
+ iterator __last1 = end();
+ const_iterator __first2 = __x.begin();
+ const_iterator __last2 = __x.end();
+ for (; __first1 != __last1 && __first2 != __last2;
+ ++__first1, ++__first2)
+ *__first1 = *__first2;
+ if (__first2 == __last2)
+ erase(__first1, __last1);
+ else
+ insert(__last1, __first2, __last2);
+ }
+ return *this;
+ }
+ template<typename _Tp, typename _Alloc>
+ void
+ list<_Tp, _Alloc>::
+ _M_fill_assign(size_type __n, const value_type& __val)
+ {
+ iterator __i = begin();
+ for (; __i != end() && __n > 0; ++__i, --__n)
+ *__i = __val;
+ if (__n > 0)
+ insert(end(), __n, __val);
+ else
+ erase(__i, end());
+ }
+ template<typename _Tp, typename _Alloc>
+ template <typename _InputIterator>
+ void
+ list<_Tp, _Alloc>::
+ _M_assign_dispatch(_InputIterator __first2, _InputIterator __last2,
+ __false_type)
+ {
+ iterator __first1 = begin();
+ iterator __last1 = end();
+ for (; __first1 != __last1 && __first2 != __last2;
+ ++__first1, ++__first2)
+ *__first1 = *__first2;
+ if (__first2 == __last2)
+ erase(__first1, __last1);
+ else
+ insert(__last1, __first2, __last2);
+ }
+ template<typename _Tp, typename _Alloc>
+ void
+ list<_Tp, _Alloc>::
+ remove(const value_type& __value)
+ {
+ iterator __first = begin();
+ iterator __last = end();
+ iterator __extra = __last;
+ while (__first != __last)
+ {
+ iterator __next = __first;
+ ++__next;
+ if (*__first == __value)
+ {
+ if (&*__first != &__value)
+ _M_erase(__first);
+ else
+ __extra = __first;
+ }
+ __first = __next;
+ }
+ if (__extra != __last)
+ _M_erase(__extra);
+ }
+ template<typename _Tp, typename _Alloc>
+ void
+ list<_Tp, _Alloc>::
+ unique()
+ {
+ iterator __first = begin();
+ iterator __last = end();
+ if (__first == __last)
+ return;
+ iterator __next = __first;
+ while (++__next != __last)
+ {
+ if (*__first == *__next)
+ _M_erase(__next);
+ else
+ __first = __next;
+ __next = __first;
+ }
+ }
+ template<typename _Tp, typename _Alloc>
+ void
+ list<_Tp, _Alloc>::
+ merge(list& __x)
+ {
+ if (this != &__x)
+ {
+ _M_check_equal_allocators(__x);
+ iterator __first1 = begin();
+ iterator __last1 = end();
+ iterator __first2 = __x.begin();
+ iterator __last2 = __x.end();
+ while (__first1 != __last1 && __first2 != __last2)
+ if (*__first2 < *__first1)
+ {
+ iterator __next = __first2;
+ _M_transfer(__first1, __first2, ++__next);
+ __first2 = __next;
+ }
+ else
+ ++__first1;
+ if (__first2 != __last2)
+ _M_transfer(__last1, __first2, __last2);
+ }
+ }
+ template<typename _Tp, typename _Alloc>
+ template <typename _StrictWeakOrdering>
+ void
+ list<_Tp, _Alloc>::
+ merge(list& __x, _StrictWeakOrdering __comp)
+ {
+ if (this != &__x)
+ {
+ _M_check_equal_allocators(__x);
+ iterator __first1 = begin();
+ iterator __last1 = end();
+ iterator __first2 = __x.begin();
+ iterator __last2 = __x.end();
+ while (__first1 != __last1 && __first2 != __last2)
+ if (__comp(*__first2, *__first1))
+ {
+ iterator __next = __first2;
+ _M_transfer(__first1, __first2, ++__next);
+ __first2 = __next;
+ }
+ else
+ ++__first1;
+ if (__first2 != __last2)
+ _M_transfer(__last1, __first2, __last2);
+ }
+ }
+ template<typename _Tp, typename _Alloc>
+ void
+ list<_Tp, _Alloc>::
+ sort()
+ {
+ if (this->_M_impl._M_node._M_next != &this->_M_impl._M_node
+ && this->_M_impl._M_node._M_next->_M_next != &this->_M_impl._M_node)
+ {
+ list __carry;
+ list __tmp[64];
+ list * __fill = &__tmp[0];
+ list * __counter;
+ do
+ {
+ __carry.splice(__carry.begin(), *this, begin());
+ for(__counter = &__tmp[0];
+ __counter != __fill && !__counter->empty();
+ ++__counter)
+ {
+ __counter->merge(__carry);
+ __carry.swap(*__counter);
+ }
+ __carry.swap(*__counter);
+ if (__counter == __fill)
+ ++__fill;
+ }
+ while ( !empty() );
+ for (__counter = &__tmp[1]; __counter != __fill; ++__counter)
+ __counter->merge(*(__counter - 1));
+ swap( *(__fill - 1) );
+ }
+ }
+ template<typename _Tp, typename _Alloc>
+ template <typename _Predicate>
+ void
+ list<_Tp, _Alloc>::
+ remove_if(_Predicate __pred)
+ {
+ iterator __first = begin();
+ iterator __last = end();
+ while (__first != __last)
+ {
+ iterator __next = __first;
+ ++__next;
+ if (__pred(*__first))
+ _M_erase(__first);
+ __first = __next;
+ }
+ }
+ template<typename _Tp, typename _Alloc>
+ template <typename _BinaryPredicate>
+ void
+ list<_Tp, _Alloc>::
+ unique(_BinaryPredicate __binary_pred)
+ {
+ iterator __first = begin();
+ iterator __last = end();
+ if (__first == __last)
+ return;
+ iterator __next = __first;
+ while (++__next != __last)
+ {
+ if (__binary_pred(*__first, *__next))
+ _M_erase(__next);
+ else
+ __first = __next;
+ __next = __first;
+ }
+ }
+ template<typename _Tp, typename _Alloc>
+ template <typename _StrictWeakOrdering>
+ void
+ list<_Tp, _Alloc>::
+ sort(_StrictWeakOrdering __comp)
+ {
+ if (this->_M_impl._M_node._M_next != &this->_M_impl._M_node
+ && this->_M_impl._M_node._M_next->_M_next != &this->_M_impl._M_node)
+ {
+ list __carry;
+ list __tmp[64];
+ list * __fill = &__tmp[0];
+ list * __counter;
+ do
+ {
+ __carry.splice(__carry.begin(), *this, begin());
+ for(__counter = &__tmp[0];
+ __counter != __fill && !__counter->empty();
+ ++__counter)
+ {
+ __counter->merge(__carry, __comp);
+ __carry.swap(*__counter);
+ }
+ __carry.swap(*__counter);
+ if (__counter == __fill)
+ ++__fill;
+ }
+ while ( !empty() );
+ for (__counter = &__tmp[1]; __counter != __fill; ++__counter)
+ __counter->merge(*(__counter - 1), __comp);
+ swap(*(__fill - 1));
+ }
+ }
+}
+extern void foobarit(void);
+class Game
+{
+public:
+ struct BuildProject
+ {
+ int posX;
+ };
+ std::list<BuildProject> buildProjects;
+};
+static Game game;
+static std::list<std::list<Game::BuildProject>::iterator>
+erasableBuildProjects;
+void *buildProjectSyncStepConcurrently(int id, int localTeam)
+{
+ __transaction_relaxed {
+ std::list<std::list<Game::BuildProject>::iterator>::iterator it
+= erasableBuildProjects.begin();
+ foobarit();
+ game.buildProjects.erase( (std::list<Game::BuildProject>
+::iterator) *it);
+ }
+ return 0;
+}
+
diff --git a/gcc/testsuite/g++.dg/tm/pr46646.C b/gcc/testsuite/g++.dg/tm/pr46646.C
new file mode 100644
index 00000000000..9431615b0fb
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tm/pr46646.C
@@ -0,0 +1,890 @@
+// { dg-do compile }
+// { dg-options "-fgnu-tm -O0"}
+
+namespace std __attribute__ ((__visibility__ ("default"))) {
+ template<class _T1, class _T2>
+ struct pair
+ {
+ typedef _T1 first_type;
+ typedef _T2 second_type;
+ _T1 first;
+ _T2 second;
+ pair()
+ : first(), second() { }
+ pair(const _T1& __a, const _T2& __b)
+ : first(__a), second(__b) { }
+ };
+}
+
+
+typedef long int ptrdiff_t;
+typedef __SIZE_TYPE__ size_t;
+namespace std __attribute__ ((__visibility__ ("default"))) {
+ using ::ptrdiff_t;
+ using ::size_t;
+}
+namespace std __attribute__ ((__visibility__ ("default"))) {
+ struct input_iterator_tag { };
+ struct output_iterator_tag { };
+ struct forward_iterator_tag : public input_iterator_tag { };
+ struct bidirectional_iterator_tag : public forward_iterator_tag { };
+ struct random_access_iterator_tag : public bidirectional_iterator_tag { };
+ template<typename _Category, typename _Tp, typename _Distance = ptrdiff_t,
+ typename _Pointer = _Tp*, typename _Reference = _Tp&>
+ struct iterator
+ {
+ typedef _Category iterator_category;
+ typedef _Tp value_type;
+ typedef _Distance difference_type;
+ typedef _Pointer pointer;
+ typedef _Reference reference;
+ };
+ template<typename _Iterator>
+ struct iterator_traits
+ {
+ typedef typename _Iterator::iterator_category iterator_category;
+ typedef typename _Iterator::value_type value_type;
+ typedef typename _Iterator::difference_type difference_type;
+ typedef typename _Iterator::pointer pointer;
+ typedef typename _Iterator::reference reference;
+ };
+ template<typename _Tp>
+ struct iterator_traits<_Tp*>
+ {
+ typedef random_access_iterator_tag iterator_category;
+ typedef _Tp value_type;
+ typedef ptrdiff_t difference_type;
+ typedef _Tp* pointer;
+ typedef _Tp& reference;
+ };
+ template<typename _Tp>
+ struct iterator_traits<const _Tp*>
+ {
+ typedef random_access_iterator_tag iterator_category;
+ typedef _Tp value_type;
+ typedef ptrdiff_t difference_type;
+ typedef const _Tp* pointer;
+ typedef const _Tp& reference;
+ };
+ template<typename _Iter>
+ inline typename iterator_traits<_Iter>::iterator_category
+ __iterator_category(const _Iter&)
+ { return typename iterator_traits<_Iter>::iterator_category(); }
+}
+namespace std __attribute__ ((__visibility__ ("default"))) {
+ template<typename _Iterator>
+ class reverse_iterator
+ : public iterator<typename iterator_traits<_Iterator>::iterator_category,
+ typename iterator_traits<_Iterator>::value_type,
+ typename iterator_traits<_Iterator>::difference_type,
+ typename iterator_traits<_Iterator>::pointer,
+ typename iterator_traits<_Iterator>::reference>
+ {
+ protected:
+ _Iterator current;
+ typedef iterator_traits<_Iterator> __traits_type;
+ public:
+ typedef _Iterator iterator_type;
+ typedef typename __traits_type::difference_type difference_type;
+ typedef typename __traits_type::pointer pointer;
+ typedef typename __traits_type::reference reference;
+ reverse_iterator() : current() { }
+ explicit
+ reverse_iterator(iterator_type __x) : current(__x) { }
+ reverse_iterator(const reverse_iterator& __x)
+ : current(__x.current) { }
+ template<typename _Iter>
+ reverse_iterator(const reverse_iterator<_Iter>& __x)
+ : current(__x.base()) { }
+ iterator_type
+ base() const
+ { return current; }
+ reference
+ operator*() const
+ {
+ _Iterator __tmp = current;
+ return *--__tmp;
+ }
+ pointer
+ operator->() const
+ { return &(operator*()); }
+ reverse_iterator&
+ operator++()
+ {
+ --current;
+ return *this;
+ }
+ reverse_iterator
+ operator++(int)
+ {
+ reverse_iterator __tmp = *this;
+ --current;
+ return __tmp;
+ }
+ reverse_iterator&
+ operator--()
+ {
+ ++current;
+ return *this;
+ }
+ reverse_iterator
+ operator--(int)
+ {
+ reverse_iterator __tmp = *this;
+ ++current;
+ return __tmp;
+ }
+ reverse_iterator
+ operator+(difference_type __n) const
+ { return reverse_iterator(current - __n); }
+ reverse_iterator&
+ operator+=(difference_type __n)
+ {
+ current -= __n;
+ return *this;
+ }
+ reverse_iterator
+ operator-(difference_type __n) const
+ { return reverse_iterator(current + __n); }
+ reverse_iterator&
+ operator-=(difference_type __n)
+ {
+ current += __n;
+ return *this;
+ }
+ reference
+ operator[](difference_type __n) const
+ { return *(*this + __n); }
+ };
+}
+
+
+
+extern "C++" {
+namespace std
+{
+ class exception
+ {
+ public:
+ exception() throw() { }
+ virtual ~exception() throw();
+ virtual const char* what() const throw();
+ };
+ class bad_exception : public exception
+ {
+ public:
+ bad_exception() throw() { }
+ virtual ~bad_exception() throw();
+ virtual const char* what() const throw();
+ };
+ typedef void (*terminate_handler) ();
+ typedef void (*unexpected_handler) ();
+ terminate_handler set_terminate(terminate_handler) throw();
+ void terminate() throw() __attribute__ ((__noreturn__));
+ unexpected_handler set_unexpected(unexpected_handler) throw();
+ void unexpected() __attribute__ ((__noreturn__));
+ bool uncaught_exception() throw() __attribute__ ((__pure__));
+}
+namespace __gnu_cxx __attribute__ ((__visibility__ ("default"))) {
+ void __verbose_terminate_handler();
+}
+}
+extern "C++" {
+namespace std
+{
+ class bad_alloc : public exception
+ {
+ public:
+ bad_alloc() throw() { }
+ virtual ~bad_alloc() throw();
+ virtual const char* what() const throw();
+ };
+ struct nothrow_t { };
+ extern const nothrow_t nothrow;
+ typedef void (*new_handler)();
+ new_handler set_new_handler(new_handler) throw();
+}
+
+void* operator new(std::size_t, const std::nothrow_t&) throw();
+void* operator new[](std::size_t, const std::nothrow_t&) throw();
+void operator delete(void*, const std::nothrow_t&) throw();
+void operator delete[](void*, const std::nothrow_t&) throw();
+inline void* operator new(std::size_t, void* __p) throw() { return __p; }
+inline void* operator new[](std::size_t, void* __p) throw() { return __p; }
+inline void operator delete (void*, void*) throw() { }
+inline void operator delete[](void*, void*) throw() { }
+}
+namespace std __attribute__ ((__visibility__ ("default"))) {
+ void
+ __throw_bad_exception(void) __attribute__((__noreturn__));
+ __attribute__((transaction_safe))
+ void
+ __throw_bad_alloc(void) __attribute__((__noreturn__));
+ void
+ __throw_bad_cast(void) __attribute__((__noreturn__));
+ void
+ __throw_bad_typeid(void) __attribute__((__noreturn__));
+ void
+ __throw_logic_error(const char*) __attribute__((__noreturn__));
+ void
+ __throw_domain_error(const char*) __attribute__((__noreturn__));
+ void
+ __throw_invalid_argument(const char*) __attribute__((__noreturn__));
+ void
+ __throw_length_error(const char*) __attribute__((__noreturn__));
+ void
+ __throw_out_of_range(const char*) __attribute__((__noreturn__));
+ void
+ __throw_runtime_error(const char*) __attribute__((__noreturn__));
+ void
+ __throw_range_error(const char*) __attribute__((__noreturn__));
+ void
+ __throw_overflow_error(const char*) __attribute__((__noreturn__));
+ void
+ __throw_underflow_error(const char*) __attribute__((__noreturn__));
+ void
+ __throw_ios_failure(const char*) __attribute__((__noreturn__));
+ void
+ __throw_system_error(int) __attribute__((__noreturn__));
+ void
+ __throw_future_error(int) __attribute__((__noreturn__));
+ void
+ __throw_bad_function_call() __attribute__((__noreturn__));
+}
+
+
+namespace std __attribute__ ((__visibility__ ("default"))) {
+ template<typename _Tp>
+ inline void
+ swap(_Tp& __a, _Tp& __b)
+ {
+
+ _Tp __tmp = (__a);
+ __a = (__b);
+ __b = (__tmp);
+ }
+ template<typename _Tp, size_t _Nm>
+ inline void
+ swap(_Tp (&__a)[_Nm], _Tp (&__b)[_Nm])
+ {
+ for (size_t __n = 0; __n < _Nm; ++__n)
+ swap(__a[__n], __b[__n]);
+ }
+}
+namespace __gnu_cxx __attribute__ ((__visibility__ ("default"))) {
+ using std::size_t;
+ using std::ptrdiff_t;
+ template<typename _Tp>
+ class new_allocator
+ {
+ public:
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ typedef _Tp* pointer;
+ typedef const _Tp* const_pointer;
+ typedef _Tp& reference;
+ typedef const _Tp& const_reference;
+ typedef _Tp value_type;
+ template<typename _Tp1>
+ struct rebind
+ { typedef new_allocator<_Tp1> other; };
+ new_allocator() throw() { }
+ new_allocator(const new_allocator&) throw() { }
+ template<typename _Tp1>
+ new_allocator(const new_allocator<_Tp1>&) throw() { }
+ ~new_allocator() throw() { }
+ pointer
+ address(reference __x) const { return &__x; }
+ const_pointer
+ address(const_reference __x) const { return &__x; }
+ __attribute__((transaction_safe))
+ pointer
+ allocate(size_type __n, const void* = 0)
+ {
+ if (__n > this->max_size())
+ std::__throw_bad_alloc();
+ return static_cast<_Tp*>(::operator new(__n * sizeof(_Tp)));
+ }
+__attribute__((transaction_safe))
+void
+ deallocate(pointer __p, size_type)
+ { ::operator delete(__p); }
+ size_type
+ max_size() const throw()
+ { return size_t(-1) / sizeof(_Tp); }
+ void
+ construct(pointer __p, const _Tp& __val)
+ { ::new((void *)__p) _Tp(__val); }
+ void
+ destroy(pointer __p) { __p->~_Tp(); }
+ };
+ template<typename _Tp>
+ inline bool
+ operator==(const new_allocator<_Tp>&, const new_allocator<_Tp>&)
+ { return true; }
+ template<typename _Tp>
+ inline bool
+ operator!=(const new_allocator<_Tp>&, const new_allocator<_Tp>&)
+ { return false; }
+}
+namespace std __attribute__ ((__visibility__ ("default"))) {
+ template<typename _Tp>
+ class allocator;
+ template<>
+ class allocator<void>
+ {
+ public:
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ typedef void* pointer;
+ typedef const void* const_pointer;
+ typedef void value_type;
+ template<typename _Tp1>
+ struct rebind
+ { typedef allocator<_Tp1> other; };
+ };
+ template<typename _Tp>
+ class allocator: public __gnu_cxx::new_allocator<_Tp>
+ {
+ public:
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ typedef _Tp* pointer;
+ typedef const _Tp* const_pointer;
+ typedef _Tp& reference;
+ typedef const _Tp& const_reference;
+ typedef _Tp value_type;
+ template<typename _Tp1>
+ struct rebind
+ { typedef allocator<_Tp1> other; };
+ allocator() throw() { }
+ allocator(const allocator& __a) throw()
+ : __gnu_cxx::new_allocator<_Tp>(__a) { }
+ template<typename _Tp1>
+ allocator(const allocator<_Tp1>&) throw() { }
+ ~allocator() throw() { }
+ };
+ template<typename _T1, typename _T2>
+ inline bool
+ operator==(const allocator<_T1>&, const allocator<_T2>&)
+ { return true; }
+ template<typename _Tp>
+ inline bool
+ operator==(const allocator<_Tp>&, const allocator<_Tp>&)
+ { return true; }
+ template<typename _T1, typename _T2>
+ inline bool
+ operator!=(const allocator<_T1>&, const allocator<_T2>&)
+ { return false; }
+ template<typename _Tp>
+ inline bool
+ operator!=(const allocator<_Tp>&, const allocator<_Tp>&)
+ { return false; }
+ //extern template class allocator<char>;
+ // extern template class allocator<wchar_t>;
+ template<typename _Alloc, bool = __is_empty(_Alloc)>
+ struct __alloc_swap
+ { static void _S_do_it(_Alloc&, _Alloc&) { } };
+ template<typename _Alloc>
+ struct __alloc_swap<_Alloc, false>
+ {
+ static void
+ _S_do_it(_Alloc& __one, _Alloc& __two)
+ {
+ if (__one != __two)
+ swap(__one, __two);
+ }
+ };
+ template<typename _Alloc, bool = __is_empty(_Alloc)>
+ struct __alloc_neq
+ {
+ static bool
+ _S_do_it(const _Alloc&, const _Alloc&)
+ { return false; }
+ };
+ template<typename _Alloc>
+ struct __alloc_neq<_Alloc, false>
+ {
+ static bool
+ _S_do_it(const _Alloc& __one, const _Alloc& __two)
+ { return __one != __two; }
+ };
+}
+namespace std __attribute__ ((__visibility__ ("default"))) {
+ template<typename _Arg, typename _Result>
+ struct unary_function
+ {
+ typedef _Arg argument_type;
+ typedef _Result result_type;
+ };
+ template<typename _Arg1, typename _Arg2, typename _Result>
+ struct binary_function
+ {
+ typedef _Arg1 first_argument_type;
+ typedef _Arg2 second_argument_type;
+ typedef _Result result_type;
+ };
+ template<typename _Tp>
+ struct equal_to : public binary_function<_Tp, _Tp, bool>
+ {
+ bool
+ operator()(const _Tp& __x, const _Tp& __y) const
+ { return __x == __y; }
+ };
+ template<typename _Tp>
+ struct not_equal_to : public binary_function<_Tp, _Tp, bool>
+ {
+ bool
+ operator()(const _Tp& __x, const _Tp& __y) const
+ { return __x != __y; }
+ };
+ template<typename _Tp>
+ struct greater : public binary_function<_Tp, _Tp, bool>
+ {
+ bool
+ operator()(const _Tp& __x, const _Tp& __y) const
+ { return __x > __y; }
+ };
+ template<typename _Tp>
+ struct less : public binary_function<_Tp, _Tp, bool>
+ {
+ bool
+ operator()(const _Tp& __x, const _Tp& __y) const
+ { return __x < __y; }
+ };
+ template<typename _Tp>
+ struct _Identity : public unary_function<_Tp,_Tp>
+ {
+ _Tp&
+ operator()(_Tp& __x) const
+ { return __x; }
+ const _Tp&
+ operator()(const _Tp& __x) const
+ { return __x; }
+ };
+}
+namespace std __attribute__ ((__visibility__ ("default"))) {
+ enum _Rb_tree_color { _S_red = false, _S_black = true };
+ struct _Rb_tree_node_base
+ {
+ typedef _Rb_tree_node_base* _Base_ptr;
+ typedef const _Rb_tree_node_base* _Const_Base_ptr;
+ _Rb_tree_color _M_color;
+ _Base_ptr _M_parent;
+ _Base_ptr _M_left;
+ _Base_ptr _M_right;
+ static _Base_ptr
+ _S_minimum(_Base_ptr __x)
+ {
+ while (__x->_M_left != 0) __x = __x->_M_left;
+ return __x;
+ }
+ static _Const_Base_ptr
+ _S_minimum(_Const_Base_ptr __x)
+ {
+ while (__x->_M_left != 0) __x = __x->_M_left;
+ return __x;
+ }
+ static _Base_ptr
+ _S_maximum(_Base_ptr __x)
+ {
+ while (__x->_M_right != 0) __x = __x->_M_right;
+ return __x;
+ }
+ static _Const_Base_ptr
+ _S_maximum(_Const_Base_ptr __x)
+ {
+ while (__x->_M_right != 0) __x = __x->_M_right;
+ return __x;
+ }
+ };
+ template<typename _Val>
+ struct _Rb_tree_node : public _Rb_tree_node_base
+ {
+ typedef _Rb_tree_node<_Val>* _Link_type;
+ _Val _M_value_field;
+ };
+ __attribute__ ((__pure__)) _Rb_tree_node_base*
+ _Rb_tree_increment(_Rb_tree_node_base* __x) throw ();
+ __attribute__ ((__pure__)) const _Rb_tree_node_base*
+ _Rb_tree_increment(const _Rb_tree_node_base* __x) throw ();
+ __attribute__ ((__pure__)) _Rb_tree_node_base*
+ _Rb_tree_decrement(_Rb_tree_node_base* __x) throw ();
+ __attribute__ ((__pure__)) const _Rb_tree_node_base*
+ _Rb_tree_decrement(const _Rb_tree_node_base* __x) throw ();
+ template<typename _Tp>
+ struct _Rb_tree_iterator
+ {
+ typedef _Tp value_type;
+ typedef _Tp& reference;
+ typedef _Tp* pointer;
+ typedef bidirectional_iterator_tag iterator_category;
+ typedef ptrdiff_t difference_type;
+ typedef _Rb_tree_iterator<_Tp> _Self;
+ typedef _Rb_tree_node_base::_Base_ptr _Base_ptr;
+ typedef _Rb_tree_node<_Tp>* _Link_type;
+ _Rb_tree_iterator()
+ : _M_node() { }
+ explicit
+ _Rb_tree_iterator(_Link_type __x)
+ : _M_node(__x) { }
+ reference
+ operator*() const
+ { return static_cast<_Link_type>(_M_node)->_M_value_field; }
+ pointer
+ operator->() const
+ { return &static_cast<_Link_type>(_M_node)->_M_value_field; }
+ _Self&
+ operator++()
+ {
+ _M_node = _Rb_tree_increment(_M_node);
+ return *this;
+ }
+ _Self
+ operator++(int)
+ {
+ _Self __tmp = *this;
+ _M_node = _Rb_tree_increment(_M_node);
+ return __tmp;
+ }
+ _Self&
+ operator--()
+ {
+ _M_node = _Rb_tree_decrement(_M_node);
+ return *this;
+ }
+ _Self
+ operator--(int)
+ {
+ _Self __tmp = *this;
+ _M_node = _Rb_tree_decrement(_M_node);
+ return __tmp;
+ }
+ bool
+ operator==(const _Self& __x) const
+ { return _M_node == __x._M_node; }
+ bool
+ operator!=(const _Self& __x) const
+ { return _M_node != __x._M_node; }
+ _Base_ptr _M_node;
+ };
+ template<typename _Tp>
+ struct _Rb_tree_const_iterator
+ {
+ typedef _Tp value_type;
+ typedef const _Tp& reference;
+ typedef const _Tp* pointer;
+ typedef _Rb_tree_iterator<_Tp> iterator;
+ typedef bidirectional_iterator_tag iterator_category;
+ typedef ptrdiff_t difference_type;
+ typedef _Rb_tree_const_iterator<_Tp> _Self;
+ typedef _Rb_tree_node_base::_Const_Base_ptr _Base_ptr;
+ typedef const _Rb_tree_node<_Tp>* _Link_type;
+ _Rb_tree_const_iterator()
+ : _M_node() { }
+ explicit
+ _Rb_tree_const_iterator(_Link_type __x)
+ : _M_node(__x) { }
+ _Rb_tree_const_iterator(const iterator& __it)
+ : _M_node(__it._M_node) { }
+ reference
+ operator*() const
+ { return static_cast<_Link_type>(_M_node)->_M_value_field; }
+ pointer
+ operator->() const
+ { return &static_cast<_Link_type>(_M_node)->_M_value_field; }
+ _Self&
+ operator++()
+ {
+ _M_node = _Rb_tree_increment(_M_node);
+ return *this;
+ }
+ _Self
+ operator++(int)
+ {
+ _Self __tmp = *this;
+ _M_node = _Rb_tree_increment(_M_node);
+ return __tmp;
+ }
+ _Self&
+ operator--()
+ {
+ _M_node = _Rb_tree_decrement(_M_node);
+ return *this;
+ }
+ _Self
+ operator--(int)
+ {
+ _Self __tmp = *this;
+ _M_node = _Rb_tree_decrement(_M_node);
+ return __tmp;
+ }
+ bool
+ operator==(const _Self& __x) const
+ { return _M_node == __x._M_node; }
+ bool
+ operator!=(const _Self& __x) const
+ { return _M_node != __x._M_node; }
+ _Base_ptr _M_node;
+ };
+ void
+ _Rb_tree_insert_and_rebalance(const bool __insert_left,
+ _Rb_tree_node_base* __x,
+ _Rb_tree_node_base* __p,
+ _Rb_tree_node_base& __header) throw ();
+ _Rb_tree_node_base*
+ _Rb_tree_rebalance_for_erase(_Rb_tree_node_base* const __z,
+ _Rb_tree_node_base& __header) throw ();
+ template<typename _Key, typename _Val, typename _KeyOfValue,
+ typename _Compare, typename _Alloc = allocator<_Val> >
+ class _Rb_tree
+ {
+ typedef typename _Alloc::template rebind<_Rb_tree_node<_Val> >::other
+ _Node_allocator;
+ protected:
+ typedef _Rb_tree_node_base* _Base_ptr;
+ typedef const _Rb_tree_node_base* _Const_Base_ptr;
+ public:
+ typedef _Key key_type;
+ typedef _Val value_type;
+ typedef value_type* pointer;
+ typedef const value_type* const_pointer;
+ typedef value_type& reference;
+ typedef const value_type& const_reference;
+ typedef _Rb_tree_node<_Val>* _Link_type;
+ typedef const _Rb_tree_node<_Val>* _Const_Link_type;
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ typedef _Alloc allocator_type;
+ _Node_allocator&
+ _M_get_Node_allocator()
+ { return *static_cast<_Node_allocator*>(&this->_M_impl); }
+ const _Node_allocator&
+ _M_get_Node_allocator() const
+ { return *static_cast<const _Node_allocator*>(&this->_M_impl); }
+ allocator_type
+ get_allocator() const
+ { return allocator_type(_M_get_Node_allocator()); }
+ protected:
+ _Link_type
+ _M_get_node()
+ { return _M_impl._Node_allocator::allocate(1); }
+ __attribute__((transaction_safe))
+ void
+ _M_put_node(_Link_type __p)
+ { _M_impl._Node_allocator::deallocate(__p, 1); }
+ __attribute__((transaction_safe))
+ _Link_type
+ _M_create_node(const value_type& __x)
+ {
+ _Link_type __tmp = _M_get_node();
+ try
+ { get_allocator().construct(&__tmp->_M_value_field, __x); }
+ catch(...)
+ {
+ _M_put_node(__tmp);
+ throw;
+ }
+ return __tmp;
+ }
+ void
+ _M_destroy_node(_Link_type __p)
+ {
+ get_allocator().destroy(&__p->_M_value_field);
+ _M_put_node(__p);
+ }
+ protected:
+ template<typename _Key_compare,
+ bool _Is_pod_comparator = __is_pod(_Key_compare)>
+ struct _Rb_tree_impl : public _Node_allocator
+ {
+ _Key_compare _M_key_compare;
+ _Rb_tree_node_base _M_header;
+ size_type _M_node_count;
+ _Rb_tree_impl()
+ : _Node_allocator(), _M_key_compare(), _M_header(),
+ _M_node_count(0)
+ { _M_initialize(); }
+ _Rb_tree_impl(const _Key_compare& __comp, const _Node_allocator& __a)
+ : _Node_allocator(__a), _M_key_compare(__comp), _M_header(),
+ _M_node_count(0)
+ { _M_initialize(); }
+ private:
+ void
+ _M_initialize()
+ {
+ this->_M_header._M_color = _S_red;
+ this->_M_header._M_parent = 0;
+ this->_M_header._M_left = &this->_M_header;
+ this->_M_header._M_right = &this->_M_header;
+ }
+ };
+ _Rb_tree_impl<_Compare> _M_impl;
+ protected:
+ _Base_ptr&
+ _M_root()
+ { return this->_M_impl._M_header._M_parent; }
+ _Const_Base_ptr
+ _M_root() const
+ { return this->_M_impl._M_header._M_parent; }
+ _Base_ptr&
+ _M_leftmost()
+ { return this->_M_impl._M_header._M_left; }
+ _Const_Base_ptr
+ _M_leftmost() const
+ { return this->_M_impl._M_header._M_left; }
+ _Base_ptr&
+ _M_rightmost()
+ { return this->_M_impl._M_header._M_right; }
+ _Const_Base_ptr
+ _M_rightmost() const
+ { return this->_M_impl._M_header._M_right; }
+ _Link_type
+ _M_begin()
+ { return static_cast<_Link_type>(this->_M_impl._M_header._M_parent); }
+ _Const_Link_type
+ _M_begin() const
+ {
+ return static_cast<_Const_Link_type>
+ (this->_M_impl._M_header._M_parent);
+ }
+ _Link_type
+ _M_end()
+ { return static_cast<_Link_type>(&this->_M_impl._M_header); }
+ _Const_Link_type
+ _M_end() const
+ { return static_cast<_Const_Link_type>(&this->_M_impl._M_header); }
+ static const_reference
+ _S_value(_Const_Link_type __x)
+ { return __x->_M_value_field; }
+ static const _Key&
+ _S_key(_Const_Link_type __x)
+ { return _KeyOfValue()(_S_value(__x)); }
+ static _Link_type
+ _S_left(_Base_ptr __x)
+ { return static_cast<_Link_type>(__x->_M_left); }
+ static _Const_Link_type
+ _S_left(_Const_Base_ptr __x)
+ { return static_cast<_Const_Link_type>(__x->_M_left); }
+ static _Link_type
+ _S_right(_Base_ptr __x)
+ { return static_cast<_Link_type>(__x->_M_right); }
+ static _Const_Link_type
+ _S_right(_Const_Base_ptr __x)
+ { return static_cast<_Const_Link_type>(__x->_M_right); }
+ static const_reference
+ _S_value(_Const_Base_ptr __x)
+ { return static_cast<_Const_Link_type>(__x)->_M_value_field; }
+ static const _Key&
+ _S_key(_Const_Base_ptr __x)
+ { return _KeyOfValue()(_S_value(__x)); }
+ public:
+ typedef _Rb_tree_iterator<value_type> iterator;
+ typedef _Rb_tree_const_iterator<value_type> const_iterator;
+ typedef std::reverse_iterator<iterator> reverse_iterator;
+ typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
+ private:
+ iterator
+ _M_insert_(_Const_Base_ptr __x, _Const_Base_ptr __y,
+ const value_type& __v);
+ public:
+ _Rb_tree() { }
+ iterator
+ begin()
+ {
+ return iterator(static_cast<_Link_type>
+ (this->_M_impl._M_header._M_left));
+ }
+ const_iterator
+ begin() const
+ {
+ return const_iterator(static_cast<_Const_Link_type>
+ (this->_M_impl._M_header._M_left));
+ }
+ iterator
+ end()
+ { return iterator(static_cast<_Link_type>(&this->_M_impl._M_header)); }
+ const_iterator
+ end() const
+ {
+ return const_iterator(static_cast<_Const_Link_type>
+ (&this->_M_impl._M_header));
+ }
+ pair<iterator, bool>
+ _M_insert_unique(const value_type& __x);
+ };
+ template<typename _Key, typename _Val, typename _KeyOfValue,
+ typename _Compare, typename _Alloc>
+ typename _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>::iterator
+ _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>::
+ _M_insert_(_Const_Base_ptr __x, _Const_Base_ptr __p, const _Val& __v)
+ {
+ _Link_type __z = _M_create_node(__v);
+ return iterator(__z);
+ }
+ template<typename _Key, typename _Val, typename _KeyOfValue,
+ typename _Compare, typename _Alloc>
+ pair<typename _Rb_tree<_Key, _Val, _KeyOfValue,
+ _Compare, _Alloc>::iterator, bool>
+ _Rb_tree<_Key, _Val, _KeyOfValue, _Compare, _Alloc>::
+ _M_insert_unique(const _Val& __v)
+ {
+ _Link_type __x = _M_begin();
+ _Link_type __y = _M_end();
+ iterator __j = iterator(__y);
+ return pair<iterator, bool>(_M_insert_(__x, __y, __v), true);
+ }
+}
+namespace std __attribute__ ((__visibility__ ("default"))) {
+ template<typename _Key, typename _Compare = std::less<_Key>,
+ typename _Alloc = std::allocator<_Key> >
+ class set
+ {
+ public:
+ typedef _Key key_type;
+ typedef _Key value_type;
+ typedef _Compare key_compare;
+ typedef _Compare value_compare;
+ typedef _Alloc allocator_type;
+ private:
+ typedef typename _Alloc::template rebind<_Key>::other _Key_alloc_type;
+ typedef _Rb_tree<key_type, value_type, _Identity<value_type>,
+ key_compare, _Key_alloc_type> _Rep_type;
+ _Rep_type _M_t;
+ public:
+ typedef typename _Key_alloc_type::pointer pointer;
+ typedef typename _Key_alloc_type::const_pointer const_pointer;
+ typedef typename _Key_alloc_type::reference reference;
+ typedef typename _Key_alloc_type::const_reference const_reference;
+ typedef typename _Rep_type::const_iterator iterator;
+ typedef typename _Rep_type::const_iterator const_iterator;
+ typedef typename _Rep_type::const_reverse_iterator reverse_iterator;
+ typedef typename _Rep_type::const_reverse_iterator const_reverse_iterator;
+ typedef typename _Rep_type::size_type size_type;
+ typedef typename _Rep_type::difference_type difference_type;
+ std::pair<iterator, bool>
+ insert(const value_type& __x)
+ {
+ _M_t._M_insert_unique(__x);
+ }
+ };
+}
+__attribute__((transaction_pure))
+void* operator new(size_t);
+__attribute__((transaction_pure))
+void operator delete(void*);
+class Widget
+{
+private:
+};
+class Screen
+{
+protected:
+std::set<Widget *> widgets;
+public:
+void addWidget(Widget* widget);
+};
+void Screen::addWidget(Widget* widget)
+{
+widgets.insert(widget);
+}
diff --git a/gcc/testsuite/g++.dg/tm/pr46653.C b/gcc/testsuite/g++.dg/tm/pr46653.C
new file mode 100644
index 00000000000..f8f3a1d3117
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tm/pr46653.C
@@ -0,0 +1,18 @@
+// { dg-do compile }
+// { dg-options "-fgnu-tm -O" }
+
+class shared_count
+{
+public:
+ volatile int j;
+ shared_count() : j(0) { }
+};
+
+shared_count * c;
+int main()
+{
+ __transaction_atomic {
+ shared_count sc;
+ }
+ return 0;
+}
diff --git a/gcc/testsuite/g++.dg/tm/pr46714.C b/gcc/testsuite/g++.dg/tm/pr46714.C
new file mode 100644
index 00000000000..130b58cdd99
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tm/pr46714.C
@@ -0,0 +1,14 @@
+// { dg-do compile }
+// { dg-options "-fgnu-tm -O" }
+
+static int asdf __attribute__ ((__weakref__("funky")));
+
+class Building
+{
+public:
+ __attribute__((transaction_safe)) ~Building(void);
+};
+
+Building::~Building()
+{
+}
diff --git a/gcc/testsuite/g++.dg/tm/pr46941.C b/gcc/testsuite/g++.dg/tm/pr46941.C
new file mode 100644
index 00000000000..eac54383138
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tm/pr46941.C
@@ -0,0 +1,37 @@
+// { dg-do "compile" }
+// { dg-options "-fgnu-tm" }
+
+class Obj
+{
+ int dummy;
+};
+
+__attribute__((transaction_safe))
+Obj* allocate()
+{
+ return new Obj;
+}
+
+__attribute__((transaction_safe))
+void deallocate(Obj * o)
+{
+ delete o;
+}
+
+__attribute__((transaction_safe))
+Obj* allocatearray()
+{
+ return new Obj[2];
+}
+
+__attribute__((transaction_safe))
+void deallocatearray(Obj *o[])
+{
+ delete [] o;
+}
+
+/* The delete/new operators are handled by the libitm runtime. */
+/* { dg-final { scan-assembler "_ZGTtnw\[mj\]" } } */
+/* { dg-final { scan-assembler "_ZGTtna\[mj\]" } } */
+/* { dg-final { scan-assembler "_ZGTtdlPv" } } */
+/* { dg-final { scan-assembler "_ZGTtdaPv" } } */
diff --git a/gcc/testsuite/g++.dg/tm/pr47340.C b/gcc/testsuite/g++.dg/tm/pr47340.C
new file mode 100644
index 00000000000..ead3361fae2
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tm/pr47340.C
@@ -0,0 +1,11 @@
+// { dg-do compile }
+// { dg-options "-fgnu-tm" }
+
+void* operator new(__SIZE_TYPE__) throw (int);
+
+void *point;
+
+void funky()
+{
+ point = new (int);
+}
diff --git a/gcc/testsuite/g++.dg/tm/pr47530.C b/gcc/testsuite/g++.dg/tm/pr47530.C
new file mode 100644
index 00000000000..8e7e27297ce
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tm/pr47530.C
@@ -0,0 +1,79 @@
+// { dg-do compile }
+// { dg-options "-fgnu-tm -O2 -fdump-tree-optimized" }
+
+typedef __SIZE_TYPE__ size_t;
+extern void *malloc(size_t);
+
+namespace bench
+{
+ class LLNode
+ {
+ LLNode* next;
+ int data;
+
+ public:
+ __attribute__((transaction_safe))
+ LLNode(int val, LLNode* m_next)
+ {
+ data = val;
+ next = m_next;
+ }
+ __attribute__((transaction_safe))
+ ~LLNode(){}
+ __attribute__((transaction_safe))
+ int get_val() {return data;}
+ __attribute__((transaction_safe))
+ LLNode* get_next() {return next;}
+ __attribute__((transaction_safe))
+ void set_val(int val) {data = val;}
+ __attribute__((transaction_safe))
+ void set_next(LLNode* n) {next = n;}
+ __attribute__((transaction_safe))
+ void *operator new(size_t size);
+ };
+
+ class LinkedList
+ {
+ LLNode* head;
+ public:
+ LinkedList();
+ void insert(int val);
+ };
+}
+
+using bench::LinkedList;
+using bench::LLNode;
+
+
+__attribute__((transaction_safe))
+void* LLNode::operator new(size_t size)
+{
+ return malloc(size);
+}
+
+LinkedList::LinkedList() : head(new LLNode(-1, 0)) { }
+
+void LinkedList::insert(int val)
+{
+ __transaction_atomic {
+ LLNode* prev = head;
+ LLNode* curr = head->get_next();
+
+ while (curr != 0) {
+ if (curr->get_val() >= val)
+ break;
+ prev = curr;
+ curr = prev->get_next();
+ }
+
+ if (!curr || (curr->get_val() > val)){
+ LLNode* tmp = new LLNode(val,curr);
+ prev->set_next(tmp);
+ }
+ }
+}
+
+// Make sure we don't do tail optimization on the commit.
+// { dg-final { scan-tree-dump-times "commitTransaction...; .tail call" 0 "optimized" } }
+// { dg-final { cleanup-tree-dump "optimized" } }
+
diff --git a/gcc/testsuite/g++.dg/tm/pr47554.C b/gcc/testsuite/g++.dg/tm/pr47554.C
new file mode 100644
index 00000000000..28841bb15ad
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tm/pr47554.C
@@ -0,0 +1,27 @@
+// { dg-do compile }
+// { dg-options "-fgnu-tm" }
+
+class list
+{
+ public: list()
+ {
+ }
+ list(const list&)
+ {
+ }
+ const list& _M_get_Node_allocator() const
+ {
+ }
+ list _M_get_Tp_allocator() const
+ {
+ return list(_M_get_Node_allocator());
+ }
+};
+static list buildProjects;
+static void build()
+{
+ __transaction_relaxed
+ {
+ buildProjects._M_get_Tp_allocator();
+ }
+}
diff --git a/gcc/testsuite/g++.dg/tm/pr47573.C b/gcc/testsuite/g++.dg/tm/pr47573.C
new file mode 100644
index 00000000000..239d9222e8e
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tm/pr47573.C
@@ -0,0 +1,25 @@
+// { dg-do compile }
+// { dg-options "-fgnu-tm" }
+
+template<typename _Tp> class allocator
+{
+ public:
+ allocator() { }
+};
+extern template class allocator<char>;
+
+template<typename _Alloc = allocator<char> > class basic_string
+{
+ public:
+ _Alloc _M_dataplus;
+
+ __attribute__((transaction_safe))
+ basic_string() : _M_dataplus(_Alloc())
+ {
+ }
+};
+
+int getStringHeight()
+{
+ basic_string<> tmp;
+}
diff --git a/gcc/testsuite/g++.dg/tm/pr47746.C b/gcc/testsuite/g++.dg/tm/pr47746.C
new file mode 100644
index 00000000000..7cd9e1006d1
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tm/pr47746.C
@@ -0,0 +1,27 @@
+// { dg-do compile }
+// { dg-options "-fgnu-tm" }
+
+class InputStream
+{
+ public:
+ virtual unsigned int readUint32 () = 0;
+};
+
+class Building
+{
+ public:
+ __attribute__((transaction_safe)) Building (InputStream *stream);
+ __attribute__((transaction_safe)) void freeGradients ();
+ void load (InputStream *stream);
+};
+
+Building::Building (InputStream *stream)
+{
+ load(stream);
+}
+
+void Building::load (InputStream *stream)
+{
+ int j = (int)stream->readUint32 ();
+ freeGradients ();
+}
diff --git a/gcc/testsuite/g++.dg/tm/template-1.C b/gcc/testsuite/g++.dg/tm/template-1.C
new file mode 100644
index 00000000000..b93828a1dc3
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tm/template-1.C
@@ -0,0 +1,35 @@
+// { dg-do compile }
+// { dg-options "-fgnu-tm -O -fdump-tree-tmmark" }
+
+struct TrueFalse
+{
+ static bool v() { return true; }
+};
+
+int global;
+
+template<typename T> int foo()
+{
+ __transaction_atomic { global += 2; }
+ return __transaction_atomic (global + 1);
+}
+
+template<typename T> int bar() __transaction_atomic
+{
+ return global + 3;
+}
+
+template<typename T> void bar2() __transaction_atomic
+{
+ global += 4;
+}
+
+int f1()
+{
+ bar2<TrueFalse>();
+ return foo<TrueFalse>() + bar<TrueFalse>();
+}
+
+/* 4 transactions overall, two of the write to global: */
+/* { dg-final { scan-tree-dump-times "ITM_RU4\\s*\\(&global" 4 "tmmark" } } */
+/* { dg-final { scan-tree-dump-times "ITM_WU4\\s*\\(&global" 2 "tmmark" } } */
diff --git a/gcc/testsuite/g++.dg/tm/tm.exp b/gcc/testsuite/g++.dg/tm/tm.exp
new file mode 100644
index 00000000000..d8c76fe1e88
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tm/tm.exp
@@ -0,0 +1,39 @@
+# Copyright (C) 2009, 2011 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# GCC testsuite that uses the `dg.exp' driver.
+
+# Load support procs.
+load_lib g++-dg.exp
+
+# If a testcase doesn't have special options, use these.
+global DEFAULT_CXXFLAGS
+if ![info exists DEFAULT_CXXFLAGS] then {
+ set DEFAULT_CXXFLAGS " -ansi -pedantic-errors -Wno-long-long"
+}
+
+# Initialize `dg'.
+dg-init
+
+# Run the tests that are shared with C.
+dg-runtest [lsort [glob -nocomplain $srcdir/c-c++-common/tm/*.c]] \
+ "" $DEFAULT_CXXFLAGS
+# Run the C++ only tests.
+dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.C]] \
+ "" $DEFAULT_CXXFLAGS
+
+# All done.
+dg-finish
diff --git a/gcc/testsuite/g++.dg/tm/vector-1.C b/gcc/testsuite/g++.dg/tm/vector-1.C
new file mode 100644
index 00000000000..2c5bb39f7ed
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tm/vector-1.C
@@ -0,0 +1,15 @@
+// { dg-do compile }
+// { dg-options "-fgnu-tm -O3" }
+
+class HashTree
+{
+ __attribute__((transaction_safe)) void rehash();
+ HashTree **Hash_table;
+ int Hash_function;
+};
+
+__attribute__((transaction_safe)) void HashTree::rehash()
+{
+ for (int i=0; i < Hash_function; i++)
+ Hash_table[i] = 0;
+}
diff --git a/gcc/testsuite/g++.dg/tm/wrap-2.C b/gcc/testsuite/g++.dg/tm/wrap-2.C
new file mode 100644
index 00000000000..564fbf87e1b
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tm/wrap-2.C
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm" } */
+
+#define W(X) __attribute__((transaction_wrap(X)))
+void f1(void);
+void f2(int);
+int i3;
+int f7(void);
+
+void g1(void) W(f1);
+void g2(void) W(f2); /* { dg-error "is not compatible" } */
+void g3(void) W(i3); /* { dg-error "is not a function" } */
+void g4(void) W(f4); /* { dg-error "not declared in this scope\|not an identifier" } */
+void g5(void) W(1); /* { dg-error "not an identifier" } */
+void g6(void) W("f1"); /* { dg-error "not an identifier" } */
+void g7(void) W(f7); /* { dg-error "is not compatible" } */
diff --git a/gcc/testsuite/gcc.dg/tm/20091013.c b/gcc/testsuite/gcc.dg/tm/20091013.c
new file mode 100644
index 00000000000..d9b3b0ed3bc
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/20091013.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -O2" } */
+
+extern long ringo(long int);
+int g,i;
+
+f()
+{
+ __transaction_relaxed {
+ for (i=0; i < 10; ++i)
+ ringo(g);
+ }
+}
diff --git a/gcc/testsuite/gcc.dg/tm/20091221.c b/gcc/testsuite/gcc.dg/tm/20091221.c
new file mode 100644
index 00000000000..1d75d153a2c
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/20091221.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -fdump-tree-tmedge" } */
+
+int i;
+extern void virgin () __attribute__((transaction_pure));
+
+foo()
+{
+ __transaction_atomic {
+ virgin(i);
+ }
+}
+
+/* { dg-final { scan-tree-dump-times "readOnly" 1 "tmedge" } } */
+/* { dg-final { cleanup-tree-dump "tmedge" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/20100125.c b/gcc/testsuite/gcc.dg/tm/20100125.c
new file mode 100644
index 00000000000..3f1dd10fea2
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/20100125.c
@@ -0,0 +1,17 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -O -fdump-tree-tmmark" } */
+
+/* Test that the call to george() doesn't end up inside the transaction. */
+
+int trxn;
+
+void set_remove(int * val)
+{
+ __transaction_atomic {
+ trxn = 5;
+ }
+ george();
+}
+
+/* { dg-final { scan-tree-dump-times "getTMCloneOrIrrevocable" 0 "tmmark" } } */
+/* { dg-final { cleanup-tree-dump "tmmark" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/20100519.c b/gcc/testsuite/gcc.dg/tm/20100519.c
new file mode 100644
index 00000000000..009b7901a39
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/20100519.c
@@ -0,0 +1,17 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -O" } */
+
+typedef struct coordinate {
+ double x;
+} coordinate_t;
+
+coordinate_t elementPtrC[3];
+
+__attribute__((transaction_safe))
+void TMelement_alloc (coordinate_t* coordinates, int numCoordinate)
+{
+ int i;
+ for (i = 0; i < numCoordinate; i++) {
+ elementPtrC[i] = coordinates[i];
+ }
+}
diff --git a/gcc/testsuite/gcc.dg/tm/20100524-2.c b/gcc/testsuite/gcc.dg/tm/20100524-2.c
new file mode 100644
index 00000000000..a3578cb2797
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/20100524-2.c
@@ -0,0 +1,20 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -O0" } */
+
+typedef struct {
+ int value[5];
+} type_t;
+
+__attribute__((transaction_safe))
+type_t func_move ();
+
+__attribute__((transaction_safe))
+type_t func_push (int type)
+{
+ type_t trace;
+
+ if (type == 9)
+ trace = func_move();
+
+ return trace;
+}
diff --git a/gcc/testsuite/gcc.dg/tm/20100603.c b/gcc/testsuite/gcc.dg/tm/20100603.c
new file mode 100644
index 00000000000..3061063394d
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/20100603.c
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -O -fdump-tree-optimized" } */
+
+int jj;
+
+__attribute__((transaction_safe))
+static void poof ()
+{
+ if (jj)
+ return;
+ poof();
+}
+
+__attribute__((transaction_safe))
+void TMlist_free ()
+{
+ poof();
+}
+
+/* { dg-final { scan-tree-dump-times "Function poof ._ZGTt4poof" 1 "optimized" } } */
+/* { dg-final { cleanup-tree-dump "optimized" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/20100609.c b/gcc/testsuite/gcc.dg/tm/20100609.c
new file mode 100644
index 00000000000..760f81ec6dd
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/20100609.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -O" } */
+
+extern void funcNoReturn() __attribute__ ((__noreturn__));
+
+int later;
+
+void MyFunc()
+{
+ __transaction_relaxed {
+ funcNoReturn();
+ later=8;
+ }
+}
diff --git a/gcc/testsuite/gcc.dg/tm/20100610.c b/gcc/testsuite/gcc.dg/tm/20100610.c
new file mode 100644
index 00000000000..0985b9ebc23
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/20100610.c
@@ -0,0 +1,90 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -O3" } */
+
+/* The function calculateCircumCircle() should get inlined into the TM
+ clone for TMelement_alloc(), so we don't need to generate a TM
+ clone for calculateCircumCircle(). We also don't need to put its
+ entry into the clone table since it's static. */
+
+/* { dg-final { scan-assembler-not "ZGTt21calculateCircumCircle" } } */
+
+extern double sqrt(double) __attribute__((transaction_pure));
+extern void *xmalloc(int) __attribute__((transaction_safe));
+
+typedef struct coordinate {
+ double x;
+ double y;
+} coordinate_t;
+typedef struct element {
+ coordinate_t coordinates[3];
+ long numCoordinate;
+ coordinate_t circumCenter;
+ double circumRadius;
+} element_t;
+
+__attribute__((transaction_safe))
+double
+coordinate_distance (coordinate_t* coordinatePtr, coordinate_t* aPtr)
+{
+ return sqrt( coordinatePtr->x );
+}
+
+__attribute__((transaction_safe))
+static void
+calculateCircumCircle (element_t* elementPtr)
+{
+ long numCoordinate = elementPtr->numCoordinate;
+ coordinate_t* coordinates = elementPtr->coordinates;
+ coordinate_t* circumCenterPtr = &elementPtr->circumCenter;
+ ((void) (0));
+ if (numCoordinate == 2) {
+ circumCenterPtr->x = (coordinates[0].x + coordinates[1].x) / 2.0;
+ circumCenterPtr->y = (coordinates[0].y + coordinates[1].y) / 2.0;
+ }
+ else {
+ double ax = coordinates[0].x;
+ double ay = coordinates[0].y;
+ double bx = coordinates[1].x;
+ double by = coordinates[1].y;
+ double cx = coordinates[2].x;
+ double cy = coordinates[2].y;
+ double bxDelta = bx - ax;
+ double byDelta = by - ay;
+ double cxDelta = cx - ax;
+ double cyDelta = cy - ay;
+ double bDistance2 = (bxDelta * bxDelta) + (byDelta * byDelta);
+ double cDistance2 = (cxDelta * cxDelta) + (cyDelta * cyDelta);
+ double xNumerator = (byDelta * cDistance2) - (cyDelta * bDistance2);
+ double yNumerator = (bxDelta * cDistance2) - (cxDelta * bDistance2);
+ double denominator = 2 * ((bxDelta * cyDelta) - (cxDelta * byDelta));
+ double rx = ax - (xNumerator / denominator);
+ double ry = ay + (yNumerator / denominator);
+ circumCenterPtr->x = rx;
+ circumCenterPtr->y = ry;
+ }
+ elementPtr->circumRadius = coordinate_distance(circumCenterPtr,
+ &coordinates[0]);
+}
+
+element_t*
+element_alloc (coordinate_t* coordinates, long numCoordinate)
+{
+ element_t* elementPtr;
+ elementPtr = (element_t*)xmalloc(sizeof(element_t));
+ if (elementPtr) {
+ calculateCircumCircle(elementPtr);
+ }
+ return elementPtr;
+}
+
+__attribute__((transaction_safe))
+element_t*
+TMelement_alloc (coordinate_t* coordinates, long numCoordinate)
+{
+ element_t* elementPtr;
+ elementPtr = (element_t*)xmalloc(sizeof(element_t));
+ if (elementPtr) {
+ calculateCircumCircle(elementPtr);
+ }
+ return elementPtr;
+}
diff --git a/gcc/testsuite/gcc.dg/tm/20100615-2.c b/gcc/testsuite/gcc.dg/tm/20100615-2.c
new file mode 100644
index 00000000000..4341e7d35ea
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/20100615-2.c
@@ -0,0 +1,19 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -O" } */
+
+__attribute__((transaction_safe))
+void Info_RemoveKey (char *s)
+{
+ char *o = 0;
+ while (1)
+ {
+ s++;
+ while (*s)
+ {
+ if (!*s)
+ return;
+ *o++ = *s++;
+ }
+ *o = 0;
+ }
+}
diff --git a/gcc/testsuite/gcc.dg/tm/20100615.c b/gcc/testsuite/gcc.dg/tm/20100615.c
new file mode 100644
index 00000000000..3d9e4684e5d
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/20100615.c
@@ -0,0 +1,42 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -O" } */
+
+/* Since the non TM version of new_node() gets optimized away, it
+ shouldn't appear in the clone table either. */
+/* { dg-final { scan-assembler-not "tm_clone_table" } } */
+
+#define NULL 0
+extern void *malloc (__SIZE_TYPE__);
+
+__attribute__((transaction_pure))
+void exit(int status);
+
+typedef struct node {
+} node_t;
+
+__attribute__((transaction_safe))
+static node_t *new_node(node_t *next)
+{
+ node_t *node;
+ node = (node_t *)malloc(sizeof(node_t));
+ if (node == NULL) {
+ exit(1);
+ }
+ return NULL;
+}
+
+static node_t *set_new()
+{
+ node_t *min, *max;
+ __transaction_atomic {
+ max = new_node(NULL);
+ min = new_node(max);
+ }
+ return min;
+}
+
+int main(int argc, char **argv)
+{
+ set_new();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/tm/20110216.c b/gcc/testsuite/gcc.dg/tm/20110216.c
new file mode 100644
index 00000000000..22edae0fb4c
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/20110216.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm" } */
+
+int george;
+
+__attribute__((transaction_callable))
+void q1()
+{
+ __transaction_atomic {
+ george=999;
+ }
+ q1();
+}
+
+/* { dg-final { scan-assembler-not "_ITM_getTMCloneOrIrrevocable" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/alias-1.c b/gcc/testsuite/gcc.dg/tm/alias-1.c
new file mode 100644
index 00000000000..364aa714086
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/alias-1.c
@@ -0,0 +1,40 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -fdump-tree-ealias -O" } */
+
+typedef __UINTPTR_TYPE__ ptrcast;
+
+#if (__SIZEOF_POINTER__ == 4)
+#define TM_LOAD __builtin__ITM_RU4
+#define TM_STORE __builtin__ITM_WU4
+#elif (__SIZEOF_POINTER__ == 8)
+#define TM_LOAD __builtin__ITM_RU8
+#define TM_STORE __builtin__ITM_WU8
+#else
+#error Add target support here
+#endif
+
+struct mystruct_type {
+ ptrcast *ptr;
+} *mystruct;
+
+ptrcast *someptr, **pp;
+ptrcast ui;
+
+void f(void)
+{
+ __transaction_atomic {
+ ui = TM_LOAD (&mystruct);
+ mystruct = (struct mystruct_type *) ui;
+ ui = TM_LOAD (&someptr);
+ someptr = (ptrcast *) ui;
+ ui = (ptrcast) someptr;
+ pp = &mystruct->ptr;
+ TM_STORE (pp, ui);
+ }
+}
+
+/* { dg-final { scan-tree-dump-times "mystruct = \{ .*ESCAPED" 1 "ealias" } } */
+/* { dg-final { scan-tree-dump-times "someptr = same as mystruct" 1 "ealias" { xfail *-*-* } } } */
+/* { dg-final { scan-tree-dump-times "ui\..* = same as mystruct" 1 "ealias" { xfail *-*-* } } } */
+/* { dg-final { scan-tree-dump-times "pp\..* = same as mystruct" 1 "ealias" { xfail *-*-* } } } */
+/* { dg-final { cleanup-tree-dump "ealias" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/alias-2.c b/gcc/testsuite/gcc.dg/tm/alias-2.c
new file mode 100644
index 00000000000..761a99c2ab0
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/alias-2.c
@@ -0,0 +1,42 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -fdump-tree-ealias -O" } */
+
+typedef __UINTPTR_TYPE__ ptrcast;
+
+#if (__SIZEOF_POINTER__ == 4)
+#define TM_LOAD __builtin__ITM_RU4
+#define TM_STORE __builtin__ITM_WU4
+#elif (__SIZEOF_POINTER__ == 8)
+#define TM_LOAD __builtin__ITM_RU8
+#define TM_STORE __builtin__ITM_WU8
+#else
+#error Add target support here
+#endif
+
+void candy ();
+
+struct mystruct_type {
+ ptrcast *ptr;
+} *mystruct, *mystruct2;
+
+ptrcast *someptr, **pp;
+ptrcast ui;
+
+void tootsie_roll () __attribute__((transaction_wrap (candy)));
+void tootsie_roll ()
+{
+ ui = TM_LOAD (&mystruct);
+ mystruct2 = (struct mystruct_type *) ui;
+
+ pp = &mystruct2->ptr;
+}
+
+void foo()
+{
+ candy();
+}
+
+/* { dg-final { scan-tree-dump-times "ui\..* = same as mystruct" 1 "ealias" { xfail *-*-* } } } */
+/* { dg-final { scan-tree-dump-times "mystruct.*ESCAPED" 1 "ealias" } } */
+/* { dg-final { scan-tree-dump-times "pp = same as mystruct" 1 "ealias" { xfail *-*-* } } } */
+/* { dg-final { cleanup-tree-dump "ealias" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/data-1.c b/gcc/testsuite/gcc.dg/tm/data-1.c
new file mode 100644
index 00000000000..16061476e08
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/data-1.c
@@ -0,0 +1,48 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm" } */
+/* Test read and write on all basic types. */
+
+static char gc;
+static signed char gsc;
+static unsigned char guc;
+
+static short gs;
+static unsigned short gus;
+
+static int gi;
+static unsigned int gui;
+
+static long gl;
+static unsigned long gul;
+
+static long long gll;
+static unsigned long long gull;
+
+static float gf;
+static double gd;
+static long double gld;
+
+void f(void)
+{
+ __transaction_atomic {
+ gc++;
+ gsc++;
+ guc++;
+
+ gs++;
+ gus++;
+
+ gi++;
+ gui++;
+
+ gl++;
+ gul++;
+
+ gll++;
+ gull++;
+
+ gf++;
+ gd++;
+ gld++;
+ }
+}
diff --git a/gcc/testsuite/gcc.dg/tm/data-2.c b/gcc/testsuite/gcc.dg/tm/data-2.c
new file mode 100644
index 00000000000..3e2a604bfb3
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/data-2.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm" } */
+/* Test read and write on all basic types. */
+
+struct S
+{
+ int x[10];
+};
+
+static struct S g;
+
+extern void fill (struct S *);
+
+void f(void)
+{
+ struct S l;
+ fill(&l);
+
+ __transaction_atomic {
+ g = l;
+ }
+}
diff --git a/gcc/testsuite/gcc.dg/tm/debug-1.c b/gcc/testsuite/gcc.dg/tm/debug-1.c
new file mode 100644
index 00000000000..fae5d6bed42
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/debug-1.c
@@ -0,0 +1,26 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -O0 -fdump-tree-tmmark-lineno" } */
+
+/* Test that instrumented statements have correct location info. */
+
+int a,b, c, z;
+
+testing(){
+ c=9;
+}
+
+main() {
+ b = 9898;
+ __transaction_relaxed {
+ z = c;
+ a = 888;
+ testing();
+ }
+ return 0;
+}
+
+/* { dg-final { scan-tree-dump-times ": 13:.*b = 9898" 1 "tmmark" } } */
+/* { dg-final { scan-tree-dump-times ": 14:.*__transaction" 1 "tmmark" } } */
+/* { dg-final { scan-tree-dump-times ": 15:.*ITM_WU. \\(&z" 1 "tmmark" } } */
+/* { dg-final { scan-tree-dump-times ": 16:.*ITM_WU. \\(&a" 1 "tmmark" } } */
+/* { dg-final { cleanup-tree-dump "tmmark" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/indirect-1.c b/gcc/testsuite/gcc.dg/tm/indirect-1.c
new file mode 100644
index 00000000000..eade848bd8b
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/indirect-1.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm" } */
+
+void foo(void (*fn)(void))
+{
+ __transaction_relaxed {
+ fn();
+ }
+}
diff --git a/gcc/testsuite/gcc.dg/tm/ipa-1.c b/gcc/testsuite/gcc.dg/tm/ipa-1.c
new file mode 100644
index 00000000000..ec1cdca7032
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/ipa-1.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -fdump-tree-tmmark-asmname" } */
+
+void foo(void) __attribute__((transaction_safe));
+
+void bar(void)
+{
+ __transaction_atomic {
+ foo();
+ }
+}
+
+/* { dg-final { scan-tree-dump-times "_ZGTt3foo" 1 "tmmark" } } */
+/* { dg-final { cleanup-tree-dump "tmmark" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/ipa-2.c b/gcc/testsuite/gcc.dg/tm/ipa-2.c
new file mode 100644
index 00000000000..e7a02cb1926
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/ipa-2.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -fdump-tree-tmmark-asmname" } */
+
+void foo(void);
+
+void bar(void)
+{
+ __transaction_relaxed {
+ foo();
+ }
+}
+
+/* { dg-final { scan-tree-dump-times "_ZGTt3foo" 0 "tmmark" } } */
+/* { dg-final { cleanup-tree-dump "tmmark" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/ipa-3.c b/gcc/testsuite/gcc.dg/tm/ipa-3.c
new file mode 100644
index 00000000000..cb1b433bcbd
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/ipa-3.c
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm" } */
+
+static int x;
+
+void __attribute__((transaction_callable))
+foo(void)
+{
+ x++;
+}
+
+/* { dg-final { scan-assembler "_ZGTt3foo" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/irrevocable-1.c b/gcc/testsuite/gcc.dg/tm/irrevocable-1.c
new file mode 100644
index 00000000000..60f629133ee
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/irrevocable-1.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -O" } */
+
+int global;
+int george;
+
+extern crap() __attribute__((transaction_unsafe));
+
+foo()
+{
+ __transaction_relaxed {
+ global++;
+ crap();
+ george++;
+ }
+}
diff --git a/gcc/testsuite/gcc.dg/tm/irrevocable-2.c b/gcc/testsuite/gcc.dg/tm/irrevocable-2.c
new file mode 100644
index 00000000000..17ac8a5f0c0
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/irrevocable-2.c
@@ -0,0 +1,21 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -fdump-tree-tmedge" } */
+
+/* Test that a direct call to __builtin__ITM_changeTransactionMode()
+ sets the irrevocable bit. */
+
+int global;
+int george;
+
+foo()
+{
+ __transaction_relaxed {
+ global++;
+ __builtin__ITM_changeTransactionMode (0);
+ george++;
+ }
+}
+
+/* { dg-final { scan-tree-dump-times "doesGoIrrevocable" 1 "tmedge" } } */
+/* { dg-final { scan-tree-dump-times "hasNoIrrevocable" 0 "tmedge" } } */
+/* { dg-final { cleanup-tree-dump "tmedge" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/irrevocable-3.c b/gcc/testsuite/gcc.dg/tm/irrevocable-3.c
new file mode 100644
index 00000000000..c0854794803
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/irrevocable-3.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -fdump-tree-tmmark" } */
+
+extern void bar(void) __attribute__((transaction_callable));
+
+foo()
+{
+ __transaction_relaxed {
+ bar();
+ }
+}
+
+/* { dg-final { scan-tree-dump-times "GTMA_MAY_ENTER_IRREVOCABLE" 1 "tmmark" } } */
+/* { dg-final { cleanup-tree-dump "tmmark" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/irrevocable-4.c b/gcc/testsuite/gcc.dg/tm/irrevocable-4.c
new file mode 100644
index 00000000000..ee759b84ef0
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/irrevocable-4.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -fdump-tree-tmmark" } */
+
+void orig(void);
+void xyz(void) __attribute__((transaction_wrap (orig)));
+
+
+foo()
+{
+ __transaction_relaxed {
+ orig();
+ }
+}
+
+/* { dg-final { scan-tree-dump-times "GTMA_MAY_ENTER_IRREVOCABLE" 1 "tmmark" } } */
+/* { dg-final { cleanup-tree-dump "tmmark" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/irrevocable-5.c b/gcc/testsuite/gcc.dg/tm/irrevocable-5.c
new file mode 100644
index 00000000000..155879f1a21
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/irrevocable-5.c
@@ -0,0 +1,27 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -fdump-ipa-tmipa -O" } */
+
+int a;
+
+void foo(void) __attribute__((transaction_safe));
+void bar(void) __attribute__((transaction_safe));
+void danger(void) __attribute__((transaction_unsafe));
+
+void wildthing()
+{
+ /* All blocks should be propagated as irrevocable. */
+ __transaction_relaxed {
+ if (a)
+ foo();
+ else
+ bar();
+ danger();
+ }
+}
+
+/* { dg-final { scan-ipa-dump-times "GTMA_DOES_GO_IRREVOCABLE" 1 "tmipa" } } */
+/* { dg-final { scan-ipa-dump-times "bb 3 goes irr" 1 "tmipa" } } */
+/* { dg-final { scan-ipa-dump-times "bb 4 goes irr" 1 "tmipa" } } */
+/* { dg-final { scan-ipa-dump-times "bb 5 goes irr" 1 "tmipa" } } */
+/* { dg-final { scan-ipa-dump-times "bb 6 goes irr" 1 "tmipa" } } */
+/* { dg-final { cleanup-ipa-dump "tmipa" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/irrevocable-6.c b/gcc/testsuite/gcc.dg/tm/irrevocable-6.c
new file mode 100644
index 00000000000..2399131210b
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/irrevocable-6.c
@@ -0,0 +1,34 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -fdump-ipa-tmipa -O" } */
+
+int a, trxn, eee;
+
+void foo(void) __attribute__((transaction_safe));
+void bar(void) __attribute__((transaction_safe));
+void danger(void) __attribute__((transaction_unsafe));
+
+void wildthing()
+{
+ /* All blocks should be propagated as irrevocable. */
+ __transaction_relaxed {
+ if (eee) {
+ if (a)
+ foo();
+ else
+ bar();
+ danger();
+ } else {
+ danger();
+ }
+ }
+}
+
+/* { dg-final { scan-ipa-dump-times "GTMA_DOES_GO_IRREVOCABLE" 1 "tmipa" } } */
+/* { dg-final { scan-ipa-dump-times "bb 3 goes irr" 1 "tmipa" } } */
+/* { dg-final { scan-ipa-dump-times "bb 4 goes irr" 1 "tmipa" } } */
+/* { dg-final { scan-ipa-dump-times "bb 5 goes irr" 1 "tmipa" } } */
+/* { dg-final { scan-ipa-dump-times "bb 6 goes irr" 1 "tmipa" } } */
+/* { dg-final { scan-ipa-dump-times "bb 7 goes irr" 1 "tmipa" } } */
+/* { dg-final { scan-ipa-dump-times "bb 8 goes irr" 1 "tmipa" } } */
+/* { dg-final { scan-ipa-dump-times "bb 9 goes irr" 1 "tmipa" } } */
+/* { dg-final { cleanup-ipa-dump "tmipa" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/irrevocable-7.c b/gcc/testsuite/gcc.dg/tm/irrevocable-7.c
new file mode 100644
index 00000000000..ea8a00f0c55
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/irrevocable-7.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -fdump-ipa-tmipa" } */
+
+extern void bark(void);
+
+__attribute__((transaction_callable))
+int foo()
+{
+ bark();
+}
+
+/* { dg-final { scan-ipa-dump-times "changeTransactionMode \\(0\\)" 1 "tmipa" } } */
+/* { dg-final { cleanup-ipa-dump "tmipa" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/memopt-1.c b/gcc/testsuite/gcc.dg/tm/memopt-1.c
new file mode 100644
index 00000000000..5388a81e282
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/memopt-1.c
@@ -0,0 +1,29 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -O -fdump-tree-tmmemopt" } */
+
+long g, xxx, yyy;
+extern george() __attribute__((transaction_callable));
+extern ringo(long int);
+int i;
+
+f()
+{
+ __transaction_relaxed {
+ g = 666;
+ george();
+ if (i == 9)
+ goto bye;
+ xxx=8;
+ yyy=9;
+ for (i=0; i < 10; ++i)
+ ringo(g);
+ bye:
+ ringo(g);
+ }
+}
+
+/* { dg-final { scan-tree-dump-times "transforming: .*_ITM_RaWU8 \\(&g\\);" 1 "tmmemopt" } } */
+/* { dg-final { scan-tree-dump-times "transforming: .*_ITM_WaRU4 \\(&i," 1 "tmmemopt" } } */
+/* { dg-final { scan-tree-dump-times "transforming: .*_ITM_RaWU4 \\(&i\\);" 1 "tmmemopt" } } */
+/* { dg-final { scan-tree-dump-times "transforming: .*_ITM_WaWU4 \\(&i," 1 "tmmemopt" } } */
+/* { dg-final { cleanup-tree-dump "tmmemopt" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/memopt-10.c b/gcc/testsuite/gcc.dg/tm/memopt-10.c
new file mode 100644
index 00000000000..5caa6b53d6b
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/memopt-10.c
@@ -0,0 +1,28 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -O -fdump-tree-tmmark" } */
+
+extern int something(void) __attribute__((transaction_safe));
+extern void *malloc (__SIZE_TYPE__) __attribute__((malloc,transaction_safe));
+
+int f()
+{
+ int *p;
+
+ p = malloc (sizeof (*p) * 100);
+
+ __transaction_atomic {
+ /* p[5] is thread private, but not transaction local since the
+ malloc is outside of the transaction. We can use the logging
+ functions for this. */
+ p[5] = 123;
+
+ if (something())
+ __transaction_cancel;
+ }
+ return p[5];
+}
+
+/* { dg-final { scan-tree-dump-times "ITM_LU" 0 "tmmark" } } */
+/* { dg-final { scan-tree-dump-times "ITM_WU" 0 "tmmark" } } */
+/* { dg-final { scan-tree-dump-times "tm_save" 1 "tmmark" } } */
+/* { dg-final { cleanup-tree-dump "tmmark" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/memopt-11.c b/gcc/testsuite/gcc.dg/tm/memopt-11.c
new file mode 100644
index 00000000000..07972a4fd4e
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/memopt-11.c
@@ -0,0 +1,29 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -O -fdump-tree-tmmark" } */
+
+extern int something(void) __attribute__((transaction_safe));
+extern void *malloc (__SIZE_TYPE__) __attribute__((malloc,transaction_safe));
+
+int f()
+{
+ int *p;
+
+ p = malloc (sizeof (*p) * 100);
+ foo(p[5]);
+
+ __transaction_atomic {
+ /* p[5] is thread private, however the SSA_NAME that holds the
+ address dominates the entire transaction (transaction
+ invariant) so we can use a save/restore pair. */
+ p[5] = 123;
+
+ if (something())
+ __transaction_cancel;
+ }
+ return p[5];
+}
+
+/* { dg-final { scan-tree-dump-times "ITM_LU" 0 "tmmark" } } */
+/* { dg-final { scan-tree-dump-times "ITM_WU" 0 "tmmark" } } */
+/* { dg-final { scan-tree-dump-times "tm_save" 1 "tmmark" } } */
+/* { dg-final { cleanup-tree-dump "tmmark" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/memopt-12.c b/gcc/testsuite/gcc.dg/tm/memopt-12.c
new file mode 100644
index 00000000000..5520ecef27a
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/memopt-12.c
@@ -0,0 +1,34 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -O -fdump-tree-tmmark" } */
+
+extern int test(void) __attribute__((transaction_safe));
+extern void *malloc (__SIZE_TYPE__) __attribute__((malloc,transaction_safe));
+
+struct large { int foo[500]; };
+
+int f()
+{
+ int *p1, *p2, *p3;
+
+ p1 = malloc (sizeof (*p1)*5000);
+ __transaction_atomic {
+ *p1 = 0;
+
+ p2 = malloc (sizeof (*p2)*6000);
+ *p2 = 1;
+
+ /* p3 = PHI (p1, p2) */
+ if (test())
+ p3 = p1;
+ else
+ p3 = p2;
+
+ /* Since both p1 and p2 are thread-private, we can inherit the
+ logging already done. No ITM_W* instrumentation necessary. */
+ *p3 = 555;
+ }
+ return p3[something()];
+}
+
+/* { dg-final { scan-tree-dump-times "ITM_WU" 0 "tmmark" } } */
+/* { dg-final { cleanup-tree-dump "tmmark" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/memopt-13.c b/gcc/testsuite/gcc.dg/tm/memopt-13.c
new file mode 100644
index 00000000000..6e93b7feaea
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/memopt-13.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -O -fdump-tree-tmmark" } */
+
+struct large { int x[100]; };
+struct large large_global;
+extern struct large function (void) __attribute__((transaction_safe));
+
+void f()
+{
+ __transaction_atomic {
+ large_global = function();
+ }
+}
+
+/* { dg-final { scan-tree-dump-times "memmoveRtWt \\\(&large_global," 1 "tmmark" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/memopt-15.c b/gcc/testsuite/gcc.dg/tm/memopt-15.c
new file mode 100644
index 00000000000..975c794337c
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/memopt-15.c
@@ -0,0 +1,30 @@
+/* { dg-do compile { target { x86_64-*-linux* } } } */
+/* { dg-options "-fgnu-tm -O" } */
+
+/* Test the TM vector logging functions. */
+
+typedef int __attribute__((vector_size (16))) vectype;
+extern int something(void) __attribute__((transaction_safe));
+extern void *malloc (__SIZE_TYPE__) __attribute__((malloc,transaction_safe));
+
+vectype vecky;
+
+vectype f()
+{
+ vectype *p;
+
+ p = malloc (sizeof (*p) * 100);
+
+ __transaction_atomic {
+ /* p[5] is thread private, but not transaction local since the
+ malloc is outside of the transaction. We can use the logging
+ functions for this. */
+ p[5] = vecky;
+
+ if (something())
+ __transaction_cancel;
+ }
+ return p[5];
+}
+
+/* { dg-final { scan-assembler "_ITM_LM128" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/memopt-2.c b/gcc/testsuite/gcc.dg/tm/memopt-2.c
new file mode 100644
index 00000000000..08aa9acdcbc
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/memopt-2.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -O -fdump-tree-tmmemopt" } */
+
+char c;
+
+void f(void)
+{
+ __transaction_atomic {
+ ++c;
+ }
+}
+
+/* { dg-final { scan-tree-dump-times "transforming.*RfWU1 \\(&c" 1 "tmmemopt" } } */
+/* { dg-final { scan-tree-dump-times "transforming.*WaWU1 \\(&c" 1 "tmmemopt" } } */
+/* { dg-final { cleanup-tree-dump "tmmemopt" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/memopt-3.c b/gcc/testsuite/gcc.dg/tm/memopt-3.c
new file mode 100644
index 00000000000..62a3e0e9f2a
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/memopt-3.c
@@ -0,0 +1,20 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -O -fdump-tree-tmmark" } */
+
+struct large { int x[100]; };
+extern int test(void) __attribute__((transaction_safe));
+
+int f()
+{
+ int i = readint();
+ struct large lala = { 0 };
+ __transaction_atomic {
+ lala.x[i] = 666;
+ if (test())
+ __transaction_cancel;
+ }
+ return lala.x[0];
+}
+
+/* { dg-final { scan-tree-dump-times "logging: lala.x\\\[i_1\\\]" 1 "tmmark" { xfail *-*-* } } } */
+/* { dg-final { cleanup-tree-dump "tmmark" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/memopt-4.c b/gcc/testsuite/gcc.dg/tm/memopt-4.c
new file mode 100644
index 00000000000..92849718441
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/memopt-4.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -O -fdump-tree-tmedge" } */
+
+/* Test thread-local memory optimizations: save/restore pairs. */
+
+struct large { int x[100]; };
+struct large bark();
+extern int test (void) __attribute__((transaction_safe));
+
+int f()
+{
+ int i = readint();
+ struct large lala = bark();
+ __transaction_atomic {
+ lala.x[55] = 666;
+ if (test())
+ __transaction_cancel;
+ }
+ return lala.x[i];
+}
+
+/* { dg-final { scan-tree-dump-times "tm_save.\[0-9_\]+ = lala.x\\\[55\\\]" 1 "tmedge" { xfail *-*-* } } } */
+/* { dg-final { scan-tree-dump-times "lala.x\\\[55\\\] = tm_save" 1 "tmedge" { xfail *-*-* } } } */
+/* { dg-final { cleanup-tree-dump "tmedge" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/memopt-5.c b/gcc/testsuite/gcc.dg/tm/memopt-5.c
new file mode 100644
index 00000000000..7b377a58035
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/memopt-5.c
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -O -fdump-tree-tmedge --param tm-max-aggregate-size=1" } */
+
+/* Test thread-local memory optimizations: logging function. */
+
+struct large { int x[100]; };
+struct large bark();
+extern int test (void) __attribute__((transaction_safe));
+
+int f()
+{
+ int i = readint();
+ struct large lala = bark();
+ __transaction_atomic {
+ lala.x[55] = 666;
+ if (test())
+ __transaction_cancel;
+ }
+ return lala.x[i];
+}
+
+/* { dg-final { scan-tree-dump-times "ITM_LU\[0-9\] \\\(&lala.x\\\[55\\\]" 1 "tmedge" { xfail *-*-* } } } */
+/* { dg-final { cleanup-tree-dump "tmedge" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/memopt-6.c b/gcc/testsuite/gcc.dg/tm/memopt-6.c
new file mode 100644
index 00000000000..f4343736772
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/memopt-6.c
@@ -0,0 +1,20 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -O -fdump-tree-tmedge --param tm-max-aggregate-size=1" } */
+
+struct large { int x[100]; };
+struct large bark();
+extern int test (void) __attribute__((transaction_safe));
+struct large lacopy;
+
+int f()
+{
+ int i = readint();
+ struct large lala = bark();
+ __transaction_atomic {
+ lala.x[55] = 666;
+ lala = lacopy; /* Aggregate instrumentation. */
+ }
+ return lala.x[i];
+}
+
+/* { dg-final { scan-tree-dump-times "memmoveRtWt \\\(&lala, &lacopy" 1 "tmedge" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/memopt-7.c b/gcc/testsuite/gcc.dg/tm/memopt-7.c
new file mode 100644
index 00000000000..f8af2a95e66
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/memopt-7.c
@@ -0,0 +1,22 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -O -fdump-tree-tmedge --param tm-max-aggregate-size=999" } */
+
+/* Test save/restore pairs for aggregates. */
+
+struct large { int x[100]; };
+extern struct large foobie (void) __attribute__((transaction_safe));
+int asdf;
+
+int f()
+{
+ struct large lala;
+ struct large lacopy = foobie();
+ __transaction_atomic {
+ lala = lacopy;
+ }
+ return lala.x[asdf];
+}
+
+/* { dg-final { scan-tree-dump-times "tm_save.\[0-9_\]+ = lala" 1 "tmedge" { xfail *-*-* } } } */
+/* { dg-final { scan-tree-dump-times "lala = tm_save" 1 "tmedge" { xfail *-*-* } } } */
+/* { dg-final { cleanup-tree-dump "tmedge" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/memopt-8.c b/gcc/testsuite/gcc.dg/tm/memopt-8.c
new file mode 100644
index 00000000000..10320e78950
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/memopt-8.c
@@ -0,0 +1,26 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -O -fdump-tree-tmmark" } */
+
+extern int something(void) __attribute__((transaction_safe));
+extern int escape(int *) __attribute__((transaction_safe));
+extern void *malloc (__SIZE_TYPE__) __attribute__((malloc,transaction_safe));
+
+int f()
+{
+ int *p;
+
+ __transaction_atomic {
+ p = malloc (sizeof (*p) * 100);
+ escape (p);
+
+ /* This should be instrumented because P escapes. */
+ p[5] = 123;
+
+ if (something())
+ __transaction_cancel;
+ }
+ return p[5];
+}
+
+/* { dg-final { scan-tree-dump-times "ITM_WU" 1 "tmmark" } } */
+/* { dg-final { cleanup-tree-dump "tmmark" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/memopt-9.c b/gcc/testsuite/gcc.dg/tm/memopt-9.c
new file mode 100644
index 00000000000..0c34f20b926
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/memopt-9.c
@@ -0,0 +1,29 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -O -fdump-tree-tmmark" } */
+
+extern int something(void) __attribute__((transaction_safe));
+extern void *malloc (__SIZE_TYPE__) __attribute__((malloc,transaction_safe));
+
+struct large { int foo[500]; };
+
+int f()
+{
+ int *p;
+ struct large *lp;
+
+ __transaction_atomic {
+ p = malloc (sizeof (*p) * 100);
+ lp = malloc (sizeof (*lp) * 100);
+
+ /* No instrumentation necessary; P and LP are transaction local. */
+ p[5] = 123;
+ lp->foo[66] = 123;
+
+ if (something())
+ __transaction_cancel;
+ }
+ return p[5];
+}
+
+/* { dg-final { scan-tree-dump-times "ITM_WU" 0 "tmmark" } } */
+/* { dg-final { cleanup-tree-dump "tmmark" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/memset-2.c b/gcc/testsuite/gcc.dg/tm/memset-2.c
new file mode 100644
index 00000000000..743ada13783
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/memset-2.c
@@ -0,0 +1,17 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -fdump-tree-tmlower" } */
+
+char array[4];
+
+void *memset(void *s, int c, __SIZE_TYPE__);
+
+int main()
+{
+ __transaction_atomic {
+ memset(array, 'b', sizeof(4));
+ }
+ return 0;
+}
+
+/* { dg-final { scan-tree-dump-times "GTMA_HAVE_STORE" 1 "tmlower" } } */
+/* { dg-final { cleanup-tree-dump "tmlower" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/memset.c b/gcc/testsuite/gcc.dg/tm/memset.c
new file mode 100644
index 00000000000..3b73ec6ee74
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/memset.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm" } */
+
+extern void *memset (void *, int, __SIZE_TYPE__);
+
+char array[4] = "aaaa";
+
+__attribute__((transaction_safe))
+void *my_memset()
+{
+ return memset(array,'b',4);
+}
+
+
+int main()
+{
+
+ __transaction_atomic {
+ my_memset();
+ }
+ return 0;
+}
+
+/* { dg-final { scan-assembler "_ITM_memsetW" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/nested-1.c b/gcc/testsuite/gcc.dg/tm/nested-1.c
new file mode 100644
index 00000000000..afadb83c183
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/nested-1.c
@@ -0,0 +1,28 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm" } */
+
+extern int foo(int) __attribute__((transaction_safe));
+void bar(void)
+{
+ __transaction_atomic {
+ if (foo(1))
+ __transaction_atomic {
+ if (foo(2))
+ __transaction_atomic {
+ if (foo(3))
+ __transaction_atomic {
+ if (foo(4))
+ foo(5);
+ else
+ __transaction_cancel;
+ }
+ else
+ __transaction_cancel;
+ }
+ else
+ __transaction_cancel;
+ }
+ else
+ __transaction_cancel;
+ }
+}
diff --git a/gcc/testsuite/gcc.dg/tm/nested-2.c b/gcc/testsuite/gcc.dg/tm/nested-2.c
new file mode 100644
index 00000000000..205ca8d7eac
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/nested-2.c
@@ -0,0 +1,19 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm" } */
+
+void foobar(void)
+{
+ __transaction_atomic {
+ foobar();
+ }
+}
+
+void doit(void) __attribute__((transaction_safe));
+
+__attribute__((transaction_callable))
+void callable(void)
+{
+ __transaction_atomic {
+ doit();
+ }
+}
diff --git a/gcc/testsuite/gcc.dg/tm/opt-1.c b/gcc/testsuite/gcc.dg/tm/opt-1.c
new file mode 100644
index 00000000000..87a8c72bd43
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/opt-1.c
@@ -0,0 +1,43 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -O" } */
+
+extern void usleep (int) __attribute__((transaction_pure));
+extern int rand(void) __attribute__((pure, transaction_pure));
+extern int printf (const char *, ...);
+extern void *malloc (__SIZE_TYPE__) __attribute__((malloc));
+extern void xyzzy (void * (*)(void *));
+
+typedef struct
+{
+ int id;
+} parm;
+
+int gvar;
+
+void *hello(void *arg)
+{
+ parm *p=(parm *)arg;
+ int tmp = p->id;
+ int tmp3;
+ printf ("Thread reads %d.\n", tmp);
+ __transaction_atomic
+ {
+ int tmp2 = gvar;
+ usleep ((int) (10.0*rand()/(10+1.0))/100);
+ gvar = tmp + tmp2;
+ tmp3 = gvar;
+ }
+ printf("tmp3 = %d\n", tmp3);
+ return 0;
+}
+
+int
+main()
+{
+ int i, n = rand();
+
+ for (i=0; i<n; i++)
+ xyzzy (hello);
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/tm/opt-2.c b/gcc/testsuite/gcc.dg/tm/opt-2.c
new file mode 100644
index 00000000000..d9e2b8a6db0
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/opt-2.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -O2" } */
+
+int foo(int *arr, int v)
+{
+ int r = 0;
+ int i;
+ __transaction_atomic {
+ for (i = 0; i < 10; ++i)
+ if (arr[i] < 27)
+ r += arr[i] += v;
+ }
+ return r;
+}
diff --git a/gcc/testsuite/gcc.dg/tm/pr45985.c b/gcc/testsuite/gcc.dg/tm/pr45985.c
new file mode 100644
index 00000000000..c8118406776
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/pr45985.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm" } */
+
+__attribute__((transaction_unsafe))
+void illegal();
+
+static int a = 0;
+void func()
+{
+ __transaction_relaxed {
+ if( a == 0)
+ illegal();
+ }
+}
diff --git a/gcc/testsuite/gcc.dg/tm/pr46567-2.c b/gcc/testsuite/gcc.dg/tm/pr46567-2.c
new file mode 100644
index 00000000000..bfe0078bfdb
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/pr46567-2.c
@@ -0,0 +1,18 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm" } */
+
+int funky();
+int global;
+
+void SeqfileGetLine()
+{
+ funky();
+}
+
+__attribute__((transaction_callable)) void readLoop()
+{
+ SeqfileGetLine();
+ if (global)
+ funky();
+
+}
diff --git a/gcc/testsuite/gcc.dg/tm/pr46567.c b/gcc/testsuite/gcc.dg/tm/pr46567.c
new file mode 100644
index 00000000000..bcc59adf509
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/pr46567.c
@@ -0,0 +1,18 @@
+__attribute__((transaction_callable))
+static void SeqfileGetLine ()
+{
+ SSIGetFilePosition ();
+}
+
+__attribute__((transaction_callable))
+static void readLoop (int addfirst)
+{
+ if (!addfirst)
+ {
+ if (!addfirst)
+ {
+ SSIGetFilePosition ();
+ }
+ SeqfileGetLine ();
+ }
+}
diff --git a/gcc/testsuite/gcc.dg/tm/pr46654.c b/gcc/testsuite/gcc.dg/tm/pr46654.c
new file mode 100644
index 00000000000..bb63b685844
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/pr46654.c
@@ -0,0 +1,26 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm" } */
+
+extern void baz(int);
+
+int y;
+void foo(volatile int x)
+{
+ __transaction_atomic {
+ x = 5; /* { dg-error "invalid volatile use of 'x' inside transaction" } */
+ x += y;
+ y++;
+ }
+ baz(x);
+}
+
+
+volatile int i = 0;
+
+void george()
+{
+ __transaction_atomic {
+ if (i == 2) /* { dg-error "invalid volatile use of 'i' inside transaction" } */
+ i = 1;
+ }
+}
diff --git a/gcc/testsuite/gcc.dg/tm/pr47520.c b/gcc/testsuite/gcc.dg/tm/pr47520.c
new file mode 100644
index 00000000000..80b976bd4cb
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/pr47520.c
@@ -0,0 +1,29 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -O" } */
+
+struct ReadSeqVars
+{
+ int format;
+ char *ss;
+};
+
+void rms_feof(struct ReadSeqVars *);
+
+__attribute__((transaction_callable)) int ReadSeq(struct ReadSeqVars *V)
+{
+ if (V->format > 1)
+ {
+ if ((V->format != 2) && (V->ss != (void*)0) )
+ {
+ V->format = 3;
+ }
+ }
+ else
+ {
+ int i = 0;
+ for (i = 0; i < 1; i++)
+ {
+ }
+ rms_feof(V);
+ }
+}
diff --git a/gcc/testsuite/gcc.dg/tm/pr47690.c b/gcc/testsuite/gcc.dg/tm/pr47690.c
new file mode 100644
index 00000000000..d18e2e11fb8
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/pr47690.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm" } */
+
+int george;
+
+void q1()
+{
+ __transaction_atomic {
+ george=999;
+ }
+ q1();
+}
+
+/* { dg-final { scan-assembler-not "ZGTt2q1" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/pr47905.c b/gcc/testsuite/gcc.dg/tm/pr47905.c
new file mode 100644
index 00000000000..c4b254930c1
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/pr47905.c
@@ -0,0 +1,14 @@
+/* { dg-do compile }
+ { dg-options "-fgnu-tm" } */
+
+void funcA();
+void funcB();
+
+void *thread()
+{
+ __transaction_relaxed
+ {
+ funcA();
+ };
+ funcB();
+}
diff --git a/gcc/testsuite/gcc.dg/tm/props-1.c b/gcc/testsuite/gcc.dg/tm/props-1.c
new file mode 100644
index 00000000000..89690240486
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/props-1.c
@@ -0,0 +1,19 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -fdump-tree-tmedge -fdump-tree-tmlower" } */
+
+int global;
+
+foo(int local)
+{
+ __transaction_atomic {
+ local++;
+ if (++global == 10)
+ __transaction_cancel;
+ }
+}
+
+/* { dg-final { scan-tree-dump-times " instrumentedCode" 1 "tmedge" } } */
+/* { dg-final { scan-tree-dump-times "hasNoAbort" 0 "tmedge" } } */
+/* { dg-final { scan-tree-dump-times "GTMA_HAVE_ABORT" 1 "tmlower" } } */
+/* { dg-final { cleanup-tree-dump "tmedge" } } */
+/* { dg-final { cleanup-tree-dump "tmlower" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/props-2.c b/gcc/testsuite/gcc.dg/tm/props-2.c
new file mode 100644
index 00000000000..56a3ffa1367
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/props-2.c
@@ -0,0 +1,20 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -fdump-ipa-tmipa" } */
+
+/* Test that irrevocability gets set for the obvious case. */
+
+int global;
+int george;
+
+extern crap() __attribute__((transaction_unsafe));
+
+foo(){
+ __transaction_relaxed {
+ global++;
+ crap();
+ george++;
+ }
+}
+
+/* { dg-final { scan-ipa-dump-times "GTMA_MAY_ENTER_IRREVOCABLE" 1 "tmipa" } } */
+/* { dg-final { cleanup-ipa-dump "tmipa" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/props-3.c b/gcc/testsuite/gcc.dg/tm/props-3.c
new file mode 100644
index 00000000000..48f2230cdd2
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/props-3.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -fdump-ipa-tmipa" } */
+
+/* Test that indirect calls set the irrevocable bit. */
+
+void (*indirect)(void);
+
+foo(){
+ __transaction_relaxed {
+ (*indirect)();
+ }
+}
+
+/* { dg-final { scan-ipa-dump-times "GTMA_MAY_ENTER_IRREVOCABLE" 1 "tmipa" } } */
+/* { dg-final { cleanup-ipa-dump "tmipa" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/props-4.c b/gcc/testsuite/gcc.dg/tm/props-4.c
new file mode 100644
index 00000000000..c9d0c2b2887
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/props-4.c
@@ -0,0 +1,27 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -fdump-tree-tmedge -fdump-tree-tmmark" } */
+
+int a, b;
+
+void __attribute((transaction_may_cancel_outer,noinline)) cancel1()
+{
+ __transaction_cancel [[outer]];
+}
+
+void
+foo(void)
+{
+ __transaction_atomic [[outer]] {
+ a = 2;
+ __transaction_atomic {
+ b = 2;
+ cancel1();
+ }
+ }
+}
+
+/* { dg-final { scan-tree-dump-times " instrumentedCode" 1 "tmedge" } } */
+/* { dg-final { scan-tree-dump-times "hasNoAbort" 0 "tmedge" } } */
+/* { dg-final { scan-tree-dump-times "LABEL=<L0>" 1 "tmmark" } } */
+/* { dg-final { cleanup-tree-dump "tmedge" } } */
+/* { dg-final { cleanup-tree-dump "tmmark" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/tm.exp b/gcc/testsuite/gcc.dg/tm/tm.exp
new file mode 100644
index 00000000000..3d24481d5a8
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/tm.exp
@@ -0,0 +1,39 @@
+# Copyright (C) 2009, 2011 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# GCC testsuite that uses the `dg.exp' driver.
+
+# Load support procs.
+load_lib gcc-dg.exp
+
+# If a testcase doesn't have special options, use these.
+global DEFAULT_CFLAGS
+if ![info exists DEFAULT_CFLAGS] then {
+ set DEFAULT_CFLAGS " -ansi -pedantic-errors"
+}
+
+# Initialize `dg'.
+dg-init
+
+# Run the tests that are shared with C++ testing.
+dg-runtest [lsort [glob -nocomplain $srcdir/c-c++-common/tm/*c]] \
+ "" $DEFAULT_CFLAGS
+# Run the C-only tests.
+dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cS\]]] \
+ "" $DEFAULT_CFLAGS
+
+# All done.
+dg-finish
diff --git a/gcc/testsuite/gcc.dg/tm/unsafe.c b/gcc/testsuite/gcc.dg/tm/unsafe.c
new file mode 100644
index 00000000000..824368a1a34
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/unsafe.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm" } */
+
+__attribute__((transaction_callable))
+static int func ()
+{
+ return 12345;
+}
+
+int main()
+{
+ __transaction_atomic { return func(); } /* { dg-error "unsafe function call .func. " } */
+}
diff --git a/gcc/testsuite/gcc.dg/tm/unused.c b/gcc/testsuite/gcc.dg/tm/unused.c
new file mode 100644
index 00000000000..7c8aa3e778d
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/unused.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -Wall" } */
+
+__attribute__((transaction_safe))
+static int unused_func () /* { dg-warning "defined but not used" } */
+{
+ return 12345;
+}
+
+int main()
+{
+ return 0;
+}
+
+/* { dg-final { scan-assembler "_ZGTt11unused_func:" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/vector-1.c b/gcc/testsuite/gcc.dg/tm/vector-1.c
new file mode 100644
index 00000000000..2dc43b0a517
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/vector-1.c
@@ -0,0 +1,18 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -O3" } */
+
+/* On x86-64, the vectorizer creates V2DI uses which we must handle.
+ Similarly for other vector architectures. */
+
+void ** newElements;
+
+__attribute__((transaction_safe))
+long
+TMqueue_push (void** queuePtr)
+{
+ long src;
+ for (src = 1; src < 9; src++) {
+ newElements[src+1] = queuePtr[src];
+ }
+ return 1;
+}
diff --git a/gcc/testsuite/gcc.dg/tm/wrap-2.c b/gcc/testsuite/gcc.dg/tm/wrap-2.c
new file mode 100644
index 00000000000..29486335a44
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/wrap-2.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm" } */
+
+#define W(X) __attribute__((transaction_wrap(X)))
+void f1(void);
+void f2(int);
+int i3;
+int f7(void);
+
+void g1(void) W(f1);
+void g2(void) W(f2); /* { dg-error "is not compatible" } */
+void g3(void) W(i3); /* { dg-error "is not a function" } */
+void g4(void) W(f4); /* { dg-error "is not a function" } */
+void g5(void) W(1); /* { dg-error "not an identifier" } */
+void g6(void) W("f1"); /* { dg-error "not an identifier" } */
+void g7(void) W(f7); /* { dg-error "is not compatible" } */
diff --git a/gcc/testsuite/gcc.dg/tm/wrap-3.c b/gcc/testsuite/gcc.dg/tm/wrap-3.c
new file mode 100644
index 00000000000..0734436809f
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/wrap-3.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -fdump-tree-optimized" } */
+
+void free (void *);
+void wrapper (void *) __attribute__((transaction_wrap (free)));
+void *p;
+
+void foo()
+{
+ __transaction_relaxed { free (p); }
+}
+
+/* { dg-final { scan-tree-dump-times "free" 0 "optimized" } } */
+/* { dg-final { cleanup-tree-dump "optimized" } } */
diff --git a/gcc/testsuite/gcc.dg/tm/wrap-4.c b/gcc/testsuite/gcc.dg/tm/wrap-4.c
new file mode 100644
index 00000000000..9e1e70c544f
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tm/wrap-4.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-options "-fgnu-tm -fdump-tree-optimized -O2" } */
+
+static void candy() { candycane(); }
+
+static void tootsie_roll () __attribute__((transaction_wrap (candy)));
+static void tootsie_roll () { bark(); }
+
+void foo()
+{
+ __transaction_relaxed { candy(); }
+}
+
+/* { dg-final { scan-tree-dump-times "candy" 0 "optimized" } } */
+/* { dg-final { cleanup-tree-dump "optimized" } } */
diff --git a/gcc/timevar.def b/gcc/timevar.def
index 5ad0f9dd08c..d4a60fceefa 100644
--- a/gcc/timevar.def
+++ b/gcc/timevar.def
@@ -184,6 +184,7 @@ DEFTIMEVAR (TV_TREE_COPY_RENAME , "tree rename SSA copies")
DEFTIMEVAR (TV_TREE_SSA_VERIFY , "tree SSA verifier")
DEFTIMEVAR (TV_TREE_STMT_VERIFY , "tree STMT verifier")
DEFTIMEVAR (TV_TREE_SWITCH_CONVERSION, "tree switch initialization conversion")
+DEFTIMEVAR (TV_TRANS_MEM , "transactional memory")
DEFTIMEVAR (TV_TREE_STRLEN , "tree strlen optimization")
DEFTIMEVAR (TV_CGRAPH_VERIFY , "callgraph verifier")
DEFTIMEVAR (TV_DOM_FRONTIERS , "dominance frontiers")
diff --git a/gcc/toplev.c b/gcc/toplev.c
index 86eed5d63a8..de255b4c3f7 100644
--- a/gcc/toplev.c
+++ b/gcc/toplev.c
@@ -599,6 +599,7 @@ compile_file (void)
output_shared_constant_pool ();
output_object_blocks ();
+ finish_tm_clone_pairs ();
/* Write out any pending weak symbol declarations. */
weak_finish ();
diff --git a/gcc/trans-mem.c b/gcc/trans-mem.c
new file mode 100644
index 00000000000..3c0bd600943
--- /dev/null
+++ b/gcc/trans-mem.c
@@ -0,0 +1,4914 @@
+/* Passes for transactional memory support.
+ Copyright (C) 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 3, or (at your option) any later
+ version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tree.h"
+#include "gimple.h"
+#include "tree-flow.h"
+#include "tree-pass.h"
+#include "tree-inline.h"
+#include "diagnostic-core.h"
+#include "demangle.h"
+#include "output.h"
+#include "trans-mem.h"
+#include "params.h"
+#include "target.h"
+#include "langhooks.h"
+#include "tree-pretty-print.h"
+#include "gimple-pretty-print.h"
+
+
+#define PROB_VERY_UNLIKELY (REG_BR_PROB_BASE / 2000 - 1)
+#define PROB_ALWAYS (REG_BR_PROB_BASE)
+
+#define A_RUNINSTRUMENTEDCODE 0x0001
+#define A_RUNUNINSTRUMENTEDCODE 0x0002
+#define A_SAVELIVEVARIABLES 0x0004
+#define A_RESTORELIVEVARIABLES 0x0008
+#define A_ABORTTRANSACTION 0x0010
+
+#define AR_USERABORT 0x0001
+#define AR_USERRETRY 0x0002
+#define AR_TMCONFLICT 0x0004
+#define AR_EXCEPTIONBLOCKABORT 0x0008
+#define AR_OUTERABORT 0x0010
+
+#define MODE_SERIALIRREVOCABLE 0x0000
+
+
+/* The representation of a transaction changes several times during the
+ lowering process. In the beginning, in the front-end we have the
+ GENERIC tree TRANSACTION_EXPR. For example,
+
+ __transaction {
+ local++;
+ if (++global == 10)
+ __tm_abort;
+ }
+
+ During initial gimplification (gimplify.c) the TRANSACTION_EXPR node is
+ trivially replaced with a GIMPLE_TRANSACTION node.
+
+ During pass_lower_tm, we examine the body of transactions looking
+ for aborts. Transactions that do not contain an abort may be
+ merged into an outer transaction. We also add a TRY-FINALLY node
+ to arrange for the transaction to be committed on any exit.
+
+ [??? Think about how this arrangement affects throw-with-commit
+ and throw-with-abort operations. In this case we want the TRY to
+ handle gotos, but not to catch any exceptions because the transaction
+ will already be closed.]
+
+ GIMPLE_TRANSACTION [label=NULL] {
+ try {
+ local = local + 1;
+ t0 = global;
+ t1 = t0 + 1;
+ global = t1;
+ if (t1 == 10)
+ __builtin___tm_abort ();
+ } finally {
+ __builtin___tm_commit ();
+ }
+ }
+
+ During pass_lower_eh, we create EH regions for the transactions,
+ intermixed with the regular EH stuff. This gives us a nice persistent
+ mapping (all the way through rtl) from transactional memory operation
+ back to the transaction, which allows us to get the abnormal edges
+ correct to model transaction aborts and restarts:
+
+ GIMPLE_TRANSACTION [label=over]
+ local = local + 1;
+ t0 = global;
+ t1 = t0 + 1;
+ global = t1;
+ if (t1 == 10)
+ __builtin___tm_abort ();
+ __builtin___tm_commit ();
+ over:
+
+ This is the end of all_lowering_passes, and so is what is present
+ during the IPA passes, and through all of the optimization passes.
+
+ During pass_ipa_tm, we examine all GIMPLE_TRANSACTION blocks in all
+ functions and mark functions for cloning.
+
+ At the end of gimple optimization, before exiting SSA form,
+ pass_tm_edges replaces statements that perform transactional
+ memory operations with the appropriate TM builtins, and swap
+ out function calls with their transactional clones. At this
+ point we introduce the abnormal transaction restart edges and
+ complete lowering of the GIMPLE_TRANSACTION node.
+
+ x = __builtin___tm_start (MAY_ABORT);
+ eh_label:
+ if (x & abort_transaction)
+ goto over;
+ local = local + 1;
+ t0 = __builtin___tm_load (global);
+ t1 = t0 + 1;
+ __builtin___tm_store (&global, t1);
+ if (t1 == 10)
+ __builtin___tm_abort ();
+ __builtin___tm_commit ();
+ over:
+*/
+
+
+/* Return the attributes we want to examine for X, or NULL if it's not
+ something we examine. We look at function types, but allow pointers
+ to function types and function decls and peek through. */
+
+static tree
+get_attrs_for (const_tree x)
+{
+ switch (TREE_CODE (x))
+ {
+ case FUNCTION_DECL:
+ return TYPE_ATTRIBUTES (TREE_TYPE (x));
+ break;
+
+ default:
+ if (TYPE_P (x))
+ return NULL;
+ x = TREE_TYPE (x);
+ if (TREE_CODE (x) != POINTER_TYPE)
+ return NULL;
+ /* FALLTHRU */
+
+ case POINTER_TYPE:
+ x = TREE_TYPE (x);
+ if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE)
+ return NULL;
+ /* FALLTHRU */
+
+ case FUNCTION_TYPE:
+ case METHOD_TYPE:
+ return TYPE_ATTRIBUTES (x);
+ }
+}
+
+/* Return true if X has been marked TM_PURE. */
+
+bool
+is_tm_pure (const_tree x)
+{
+ unsigned flags;
+
+ switch (TREE_CODE (x))
+ {
+ case FUNCTION_DECL:
+ case FUNCTION_TYPE:
+ case METHOD_TYPE:
+ break;
+
+ default:
+ if (TYPE_P (x))
+ return false;
+ x = TREE_TYPE (x);
+ if (TREE_CODE (x) != POINTER_TYPE)
+ return false;
+ /* FALLTHRU */
+
+ case POINTER_TYPE:
+ x = TREE_TYPE (x);
+ if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE)
+ return false;
+ break;
+ }
+
+ flags = flags_from_decl_or_type (x);
+ return (flags & ECF_TM_PURE) != 0;
+}
+
+/* Return true if X has been marked TM_IRREVOCABLE. */
+
+static bool
+is_tm_irrevocable (tree x)
+{
+ tree attrs = get_attrs_for (x);
+
+ if (attrs && lookup_attribute ("transaction_unsafe", attrs))
+ return true;
+
+ /* A call to the irrevocable builtin is by definition,
+ irrevocable. */
+ if (TREE_CODE (x) == ADDR_EXPR)
+ x = TREE_OPERAND (x, 0);
+ if (TREE_CODE (x) == FUNCTION_DECL
+ && DECL_BUILT_IN_CLASS (x) == BUILT_IN_NORMAL
+ && DECL_FUNCTION_CODE (x) == BUILT_IN_TM_IRREVOCABLE)
+ return true;
+
+ return false;
+}
+
+/* Return true if X has been marked TM_SAFE. */
+
+bool
+is_tm_safe (const_tree x)
+{
+ if (flag_tm)
+ {
+ tree attrs = get_attrs_for (x);
+ if (attrs)
+ {
+ if (lookup_attribute ("transaction_safe", attrs))
+ return true;
+ if (lookup_attribute ("transaction_may_cancel_outer", attrs))
+ return true;
+ }
+ }
+ return false;
+}
+
+/* Return true if CALL is const, or tm_pure. */
+
+static bool
+is_tm_pure_call (gimple call)
+{
+ tree fn = gimple_call_fn (call);
+
+ if (TREE_CODE (fn) == ADDR_EXPR)
+ {
+ fn = TREE_OPERAND (fn, 0);
+ gcc_assert (TREE_CODE (fn) == FUNCTION_DECL);
+ }
+ else
+ fn = TREE_TYPE (fn);
+
+ return is_tm_pure (fn);
+}
+
+/* Return true if X has been marked TM_CALLABLE. */
+
+static bool
+is_tm_callable (tree x)
+{
+ tree attrs = get_attrs_for (x);
+ if (attrs)
+ {
+ if (lookup_attribute ("transaction_callable", attrs))
+ return true;
+ if (lookup_attribute ("transaction_safe", attrs))
+ return true;
+ if (lookup_attribute ("transaction_may_cancel_outer", attrs))
+ return true;
+ }
+ return false;
+}
+
+/* Return true if X has been marked TRANSACTION_MAY_CANCEL_OUTER. */
+
+bool
+is_tm_may_cancel_outer (tree x)
+{
+ tree attrs = get_attrs_for (x);
+ if (attrs)
+ return lookup_attribute ("transaction_may_cancel_outer", attrs) != NULL;
+ return false;
+}
+
+/* Return true for built in functions that "end" a transaction. */
+
+bool
+is_tm_ending_fndecl (tree fndecl)
+{
+ if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
+ switch (DECL_FUNCTION_CODE (fndecl))
+ {
+ case BUILT_IN_TM_COMMIT:
+ case BUILT_IN_TM_COMMIT_EH:
+ case BUILT_IN_TM_ABORT:
+ case BUILT_IN_TM_IRREVOCABLE:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/* Return true if STMT is a TM load. */
+
+static bool
+is_tm_load (gimple stmt)
+{
+ tree fndecl;
+
+ if (gimple_code (stmt) != GIMPLE_CALL)
+ return false;
+
+ fndecl = gimple_call_fndecl (stmt);
+ return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
+ && BUILTIN_TM_LOAD_P (DECL_FUNCTION_CODE (fndecl)));
+}
+
+/* Same as above, but for simple TM loads, that is, not the
+ after-write, after-read, etc optimized variants. */
+
+static bool
+is_tm_simple_load (gimple stmt)
+{
+ tree fndecl;
+
+ if (gimple_code (stmt) != GIMPLE_CALL)
+ return false;
+
+ fndecl = gimple_call_fndecl (stmt);
+ if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
+ {
+ enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
+ return (fcode == BUILT_IN_TM_LOAD_1
+ || fcode == BUILT_IN_TM_LOAD_2
+ || fcode == BUILT_IN_TM_LOAD_4
+ || fcode == BUILT_IN_TM_LOAD_8
+ || fcode == BUILT_IN_TM_LOAD_FLOAT
+ || fcode == BUILT_IN_TM_LOAD_DOUBLE
+ || fcode == BUILT_IN_TM_LOAD_LDOUBLE
+ || fcode == BUILT_IN_TM_LOAD_M64
+ || fcode == BUILT_IN_TM_LOAD_M128
+ || fcode == BUILT_IN_TM_LOAD_M256);
+ }
+ return false;
+}
+
+/* Return true if STMT is a TM store. */
+
+static bool
+is_tm_store (gimple stmt)
+{
+ tree fndecl;
+
+ if (gimple_code (stmt) != GIMPLE_CALL)
+ return false;
+
+ fndecl = gimple_call_fndecl (stmt);
+ return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
+ && BUILTIN_TM_STORE_P (DECL_FUNCTION_CODE (fndecl)));
+}
+
+/* Same as above, but for simple TM stores, that is, not the
+ after-write, after-read, etc optimized variants. */
+
+static bool
+is_tm_simple_store (gimple stmt)
+{
+ tree fndecl;
+
+ if (gimple_code (stmt) != GIMPLE_CALL)
+ return false;
+
+ fndecl = gimple_call_fndecl (stmt);
+ if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
+ {
+ enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
+ return (fcode == BUILT_IN_TM_STORE_1
+ || fcode == BUILT_IN_TM_STORE_2
+ || fcode == BUILT_IN_TM_STORE_4
+ || fcode == BUILT_IN_TM_STORE_8
+ || fcode == BUILT_IN_TM_STORE_FLOAT
+ || fcode == BUILT_IN_TM_STORE_DOUBLE
+ || fcode == BUILT_IN_TM_STORE_LDOUBLE
+ || fcode == BUILT_IN_TM_STORE_M64
+ || fcode == BUILT_IN_TM_STORE_M128
+ || fcode == BUILT_IN_TM_STORE_M256);
+ }
+ return false;
+}
+
+/* Return true if FNDECL is BUILT_IN_TM_ABORT. */
+
+static bool
+is_tm_abort (tree fndecl)
+{
+ return (fndecl
+ && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
+ && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_TM_ABORT);
+}
+
+/* Build a GENERIC tree for a user abort. This is called by front ends
+ while transforming the __tm_abort statement. */
+
+tree
+build_tm_abort_call (location_t loc, bool is_outer)
+{
+ return build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TM_ABORT), 1,
+ build_int_cst (integer_type_node,
+ AR_USERABORT
+ | (is_outer ? AR_OUTERABORT : 0)));
+}
+
+/* Common gateing function for several of the TM passes. */
+
+static bool
+gate_tm (void)
+{
+ return flag_tm;
+}
+
+/* Map for aribtrary function replacement under TM, as created
+ by the tm_wrap attribute. */
+
+static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
+ htab_t tm_wrap_map;
+
+void
+record_tm_replacement (tree from, tree to)
+{
+ struct tree_map **slot, *h;
+
+ /* Do not inline wrapper functions that will get replaced in the TM
+ pass.
+
+ Suppose you have foo() that will get replaced into tmfoo(). Make
+ sure the inliner doesn't try to outsmart us and inline foo()
+ before we get a chance to do the TM replacement. */
+ DECL_UNINLINABLE (from) = 1;
+
+ if (tm_wrap_map == NULL)
+ tm_wrap_map = htab_create_ggc (32, tree_map_hash, tree_map_eq, 0);
+
+ h = ggc_alloc_tree_map ();
+ h->hash = htab_hash_pointer (from);
+ h->base.from = from;
+ h->to = to;
+
+ slot = (struct tree_map **)
+ htab_find_slot_with_hash (tm_wrap_map, h, h->hash, INSERT);
+ *slot = h;
+}
+
+/* Return a TM-aware replacement function for DECL. */
+
+static tree
+find_tm_replacement_function (tree fndecl)
+{
+ if (tm_wrap_map)
+ {
+ struct tree_map *h, in;
+
+ in.base.from = fndecl;
+ in.hash = htab_hash_pointer (fndecl);
+ h = (struct tree_map *) htab_find_with_hash (tm_wrap_map, &in, in.hash);
+ if (h)
+ return h->to;
+ }
+
+ /* ??? We may well want TM versions of most of the common <string.h>
+ functions. For now, we've already these two defined. */
+ /* Adjust expand_call_tm() attributes as necessary for the cases
+ handled here: */
+ if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
+ switch (DECL_FUNCTION_CODE (fndecl))
+ {
+ case BUILT_IN_MEMCPY:
+ return builtin_decl_explicit (BUILT_IN_TM_MEMCPY);
+ case BUILT_IN_MEMMOVE:
+ return builtin_decl_explicit (BUILT_IN_TM_MEMMOVE);
+ case BUILT_IN_MEMSET:
+ return builtin_decl_explicit (BUILT_IN_TM_MEMSET);
+ default:
+ return NULL;
+ }
+
+ return NULL;
+}
+
+/* When appropriate, record TM replacement for memory allocation functions.
+
+ FROM is the FNDECL to wrap. */
+void
+tm_malloc_replacement (tree from)
+{
+ const char *str;
+ tree to;
+
+ if (TREE_CODE (from) != FUNCTION_DECL)
+ return;
+
+ /* If we have a previous replacement, the user must be explicitly
+ wrapping malloc/calloc/free. They better know what they're
+ doing... */
+ if (find_tm_replacement_function (from))
+ return;
+
+ str = IDENTIFIER_POINTER (DECL_NAME (from));
+
+ if (!strcmp (str, "malloc"))
+ to = builtin_decl_explicit (BUILT_IN_TM_MALLOC);
+ else if (!strcmp (str, "calloc"))
+ to = builtin_decl_explicit (BUILT_IN_TM_CALLOC);
+ else if (!strcmp (str, "free"))
+ to = builtin_decl_explicit (BUILT_IN_TM_FREE);
+ else
+ return;
+
+ TREE_NOTHROW (to) = 0;
+
+ record_tm_replacement (from, to);
+}
+
+/* Diagnostics for tm_safe functions/regions. Called by the front end
+ once we've lowered the function to high-gimple. */
+
+/* Subroutine of diagnose_tm_safe_errors, called through walk_gimple_seq.
+ Process exactly one statement. WI->INFO is set to non-null when in
+ the context of a tm_safe function, and null for a __transaction block. */
+
+#define DIAG_TM_OUTER 1
+#define DIAG_TM_SAFE 2
+#define DIAG_TM_RELAXED 4
+
+struct diagnose_tm
+{
+ unsigned int summary_flags : 8;
+ unsigned int block_flags : 8;
+ unsigned int func_flags : 8;
+ unsigned int saw_unsafe : 1;
+ unsigned int saw_volatile : 1;
+ gimple stmt;
+};
+
+/* Tree callback function for diagnose_tm pass. */
+
+static tree
+diagnose_tm_1_op (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED,
+ void *data)
+{
+ struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
+ struct diagnose_tm *d = (struct diagnose_tm *) wi->info;
+ enum tree_code code = TREE_CODE (*tp);
+
+ if ((code == VAR_DECL
+ || code == RESULT_DECL
+ || code == PARM_DECL)
+ && d->block_flags & (DIAG_TM_SAFE | DIAG_TM_RELAXED)
+ && TREE_THIS_VOLATILE (TREE_TYPE (*tp))
+ && !d->saw_volatile)
+ {
+ d->saw_volatile = 1;
+ error_at (gimple_location (d->stmt),
+ "invalid volatile use of %qD inside transaction",
+ *tp);
+ }
+
+ return NULL_TREE;
+}
+
+static tree
+diagnose_tm_1 (gimple_stmt_iterator *gsi, bool *handled_ops_p,
+ struct walk_stmt_info *wi)
+{
+ gimple stmt = gsi_stmt (*gsi);
+ struct diagnose_tm *d = (struct diagnose_tm *) wi->info;
+
+ /* Save stmt for use in leaf analysis. */
+ d->stmt = stmt;
+
+ switch (gimple_code (stmt))
+ {
+ case GIMPLE_CALL:
+ {
+ tree fn = gimple_call_fn (stmt);
+
+ if ((d->summary_flags & DIAG_TM_OUTER) == 0
+ && is_tm_may_cancel_outer (fn))
+ error_at (gimple_location (stmt),
+ "%<transaction_may_cancel_outer%> function call not within"
+ " outer transaction or %<transaction_may_cancel_outer%>");
+
+ if (d->summary_flags & DIAG_TM_SAFE)
+ {
+ bool is_safe, direct_call_p;
+ tree replacement;
+
+ if (TREE_CODE (fn) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND (fn, 0)) == FUNCTION_DECL)
+ {
+ direct_call_p = true;
+ replacement = TREE_OPERAND (fn, 0);
+ replacement = find_tm_replacement_function (replacement);
+ if (replacement)
+ fn = replacement;
+ }
+ else
+ {
+ direct_call_p = false;
+ replacement = NULL_TREE;
+ }
+
+ if (is_tm_safe_or_pure (fn))
+ is_safe = true;
+ else if (is_tm_callable (fn) || is_tm_irrevocable (fn))
+ {
+ /* A function explicitly marked transaction_callable as
+ opposed to transaction_safe is being defined to be
+ unsafe as part of its ABI, regardless of its contents. */
+ is_safe = false;
+ }
+ else if (direct_call_p)
+ {
+ if (flags_from_decl_or_type (fn) & ECF_TM_BUILTIN)
+ is_safe = true;
+ else if (replacement)
+ {
+ /* ??? At present we've been considering replacements
+ merely transaction_callable, and therefore might
+ enter irrevocable. The tm_wrap attribute has not
+ yet made it into the new language spec. */
+ is_safe = false;
+ }
+ else
+ {
+ /* ??? Diagnostics for unmarked direct calls moved into
+ the IPA pass. Section 3.2 of the spec details how
+ functions not marked should be considered "implicitly
+ safe" based on having examined the function body. */
+ is_safe = true;
+ }
+ }
+ else
+ {
+ /* An unmarked indirect call. Consider it unsafe even
+ though optimization may yet figure out how to inline. */
+ is_safe = false;
+ }
+
+ if (!is_safe)
+ {
+ if (TREE_CODE (fn) == ADDR_EXPR)
+ fn = TREE_OPERAND (fn, 0);
+ if (d->block_flags & DIAG_TM_SAFE)
+ error_at (gimple_location (stmt),
+ "unsafe function call %qD within "
+ "atomic transaction", fn);
+ else
+ error_at (gimple_location (stmt),
+ "unsafe function call %qD within "
+ "%<transaction_safe%> function", fn);
+ }
+ }
+ }
+ break;
+
+ case GIMPLE_ASM:
+ /* ??? We ought to come up with a way to add attributes to
+ asm statements, and then add "transaction_safe" to it.
+ Either that or get the language spec to resurrect __tm_waiver. */
+ if (d->block_flags & DIAG_TM_SAFE)
+ error_at (gimple_location (stmt),
+ "asm not allowed in atomic transaction");
+ else if (d->func_flags & DIAG_TM_SAFE)
+ error_at (gimple_location (stmt),
+ "asm not allowed in %<transaction_safe%> function");
+ else
+ d->saw_unsafe = true;
+ break;
+
+ case GIMPLE_TRANSACTION:
+ {
+ unsigned char inner_flags = DIAG_TM_SAFE;
+
+ if (gimple_transaction_subcode (stmt) & GTMA_IS_RELAXED)
+ {
+ if (d->block_flags & DIAG_TM_SAFE)
+ error_at (gimple_location (stmt),
+ "relaxed transaction in atomic transaction");
+ else if (d->func_flags & DIAG_TM_SAFE)
+ error_at (gimple_location (stmt),
+ "relaxed transaction in %<transaction_safe%> function");
+ else
+ d->saw_unsafe = true;
+ inner_flags = DIAG_TM_RELAXED;
+ }
+ else if (gimple_transaction_subcode (stmt) & GTMA_IS_OUTER)
+ {
+ if (d->block_flags)
+ error_at (gimple_location (stmt),
+ "outer transaction in transaction");
+ else if (d->func_flags & DIAG_TM_OUTER)
+ error_at (gimple_location (stmt),
+ "outer transaction in "
+ "%<transaction_may_cancel_outer%> function");
+ else if (d->func_flags & DIAG_TM_SAFE)
+ error_at (gimple_location (stmt),
+ "outer transaction in %<transaction_safe%> function");
+ else
+ d->saw_unsafe = true;
+ inner_flags |= DIAG_TM_OUTER;
+ }
+
+ *handled_ops_p = true;
+ if (gimple_transaction_body (stmt))
+ {
+ struct walk_stmt_info wi_inner;
+ struct diagnose_tm d_inner;
+
+ memset (&d_inner, 0, sizeof (d_inner));
+ d_inner.func_flags = d->func_flags;
+ d_inner.block_flags = d->block_flags | inner_flags;
+ d_inner.summary_flags = d_inner.func_flags | d_inner.block_flags;
+
+ memset (&wi_inner, 0, sizeof (wi_inner));
+ wi_inner.info = &d_inner;
+
+ walk_gimple_seq (gimple_transaction_body (stmt),
+ diagnose_tm_1, diagnose_tm_1_op, &wi_inner);
+
+ d->saw_unsafe |= d_inner.saw_unsafe;
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return NULL_TREE;
+}
+
+static unsigned int
+diagnose_tm_blocks (void)
+{
+ struct walk_stmt_info wi;
+ struct diagnose_tm d;
+
+ memset (&d, 0, sizeof (d));
+ if (is_tm_may_cancel_outer (current_function_decl))
+ d.func_flags = DIAG_TM_OUTER | DIAG_TM_SAFE;
+ else if (is_tm_safe (current_function_decl))
+ d.func_flags = DIAG_TM_SAFE;
+ d.summary_flags = d.func_flags;
+
+ memset (&wi, 0, sizeof (wi));
+ wi.info = &d;
+
+ walk_gimple_seq (gimple_body (current_function_decl),
+ diagnose_tm_1, diagnose_tm_1_op, &wi);
+
+ /* If we saw something other than a call that makes this function
+ unsafe, remember it so that the IPA pass only needs to scan calls. */
+ if (d.saw_unsafe && !is_tm_safe_or_pure (current_function_decl))
+ cgraph_local_info (current_function_decl)->tm_may_enter_irr = 1;
+
+ return 0;
+}
+
+struct gimple_opt_pass pass_diagnose_tm_blocks =
+{
+ {
+ GIMPLE_PASS,
+ "*diagnose_tm_blocks", /* name */
+ gate_tm, /* gate */
+ diagnose_tm_blocks, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_TRANS_MEM, /* tv_id */
+ PROP_gimple_any, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
+ }
+};
+
+/* Instead of instrumenting thread private memory, we save the
+ addresses in a log which we later use to save/restore the addresses
+ upon transaction start/restart.
+
+ The log is keyed by address, where each element contains individual
+ statements among different code paths that perform the store.
+
+ This log is later used to generate either plain save/restore of the
+ addresses upon transaction start/restart, or calls to the ITM_L*
+ logging functions.
+
+ So for something like:
+
+ struct large { int x[1000]; };
+ struct large lala = { 0 };
+ __transaction {
+ lala.x[i] = 123;
+ ...
+ }
+
+ We can either save/restore:
+
+ lala = { 0 };
+ trxn = _ITM_startTransaction ();
+ if (trxn & a_saveLiveVariables)
+ tmp_lala1 = lala.x[i];
+ else if (a & a_restoreLiveVariables)
+ lala.x[i] = tmp_lala1;
+
+ or use the logging functions:
+
+ lala = { 0 };
+ trxn = _ITM_startTransaction ();
+ _ITM_LU4 (&lala.x[i]);
+
+ Obviously, if we use _ITM_L* to log, we prefer to call _ITM_L* as
+ far up the dominator tree to shadow all of the writes to a given
+ location (thus reducing the total number of logging calls), but not
+ so high as to be called on a path that does not perform a
+ write. */
+
+/* One individual log entry. We may have multiple statements for the
+ same location if neither dominate each other (on different
+ execution paths). */
+typedef struct tm_log_entry
+{
+ /* Address to save. */
+ tree addr;
+ /* Entry block for the transaction this address occurs in. */
+ basic_block entry_block;
+ /* Dominating statements the store occurs in. */
+ gimple_vec stmts;
+ /* Initially, while we are building the log, we place a nonzero
+ value here to mean that this address *will* be saved with a
+ save/restore sequence. Later, when generating the save sequence
+ we place the SSA temp generated here. */
+ tree save_var;
+} *tm_log_entry_t;
+
+/* The actual log. */
+static htab_t tm_log;
+
+/* Addresses to log with a save/restore sequence. These should be in
+ dominator order. */
+static VEC(tree,heap) *tm_log_save_addresses;
+
+/* Map for an SSA_NAME originally pointing to a non aliased new piece
+ of memory (malloc, alloc, etc). */
+static htab_t tm_new_mem_hash;
+
+enum thread_memory_type
+ {
+ mem_non_local = 0,
+ mem_thread_local,
+ mem_transaction_local,
+ mem_max
+ };
+
+typedef struct tm_new_mem_map
+{
+ /* SSA_NAME being dereferenced. */
+ tree val;
+ enum thread_memory_type local_new_memory;
+} tm_new_mem_map_t;
+
+/* Htab support. Return hash value for a `tm_log_entry'. */
+static hashval_t
+tm_log_hash (const void *p)
+{
+ const struct tm_log_entry *log = (const struct tm_log_entry *) p;
+ return iterative_hash_expr (log->addr, 0);
+}
+
+/* Htab support. Return true if two log entries are the same. */
+static int
+tm_log_eq (const void *p1, const void *p2)
+{
+ const struct tm_log_entry *log1 = (const struct tm_log_entry *) p1;
+ const struct tm_log_entry *log2 = (const struct tm_log_entry *) p2;
+
+ /* FIXME:
+
+ rth: I suggest that we get rid of the component refs etc.
+ I.e. resolve the reference to base + offset.
+
+ We may need to actually finish a merge with mainline for this,
+ since we'd like to be presented with Richi's MEM_REF_EXPRs more
+ often than not. But in the meantime your tm_log_entry could save
+ the results of get_inner_reference.
+
+ See: g++.dg/tm/pr46653.C
+ */
+
+ /* Special case plain equality because operand_equal_p() below will
+ return FALSE if the addresses are equal but they have
+ side-effects (e.g. a volatile address). */
+ if (log1->addr == log2->addr)
+ return true;
+
+ return operand_equal_p (log1->addr, log2->addr, 0);
+}
+
+/* Htab support. Free one tm_log_entry. */
+static void
+tm_log_free (void *p)
+{
+ struct tm_log_entry *lp = (struct tm_log_entry *) p;
+ VEC_free (gimple, heap, lp->stmts);
+ free (lp);
+}
+
+/* Initialize logging data structures. */
+static void
+tm_log_init (void)
+{
+ tm_log = htab_create (10, tm_log_hash, tm_log_eq, tm_log_free);
+ tm_new_mem_hash = htab_create (5, struct_ptr_hash, struct_ptr_eq, free);
+ tm_log_save_addresses = VEC_alloc (tree, heap, 5);
+}
+
+/* Free logging data structures. */
+static void
+tm_log_delete (void)
+{
+ htab_delete (tm_log);
+ htab_delete (tm_new_mem_hash);
+ VEC_free (tree, heap, tm_log_save_addresses);
+}
+
+/* Return true if MEM is a transaction invariant memory for the TM
+ region starting at REGION_ENTRY_BLOCK. */
+static bool
+transaction_invariant_address_p (const_tree mem, basic_block region_entry_block)
+{
+ if ((TREE_CODE (mem) == INDIRECT_REF || TREE_CODE (mem) == MEM_REF)
+ && TREE_CODE (TREE_OPERAND (mem, 0)) == SSA_NAME)
+ {
+ basic_block def_bb;
+
+ def_bb = gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (mem, 0)));
+ return def_bb != region_entry_block
+ && dominated_by_p (CDI_DOMINATORS, region_entry_block, def_bb);
+ }
+
+ mem = strip_invariant_refs (mem);
+ return mem && (CONSTANT_CLASS_P (mem) || decl_address_invariant_p (mem));
+}
+
+/* Given an address ADDR in STMT, find it in the memory log or add it,
+ making sure to keep only the addresses highest in the dominator
+ tree.
+
+ ENTRY_BLOCK is the entry_block for the transaction.
+
+ If we find the address in the log, make sure it's either the same
+ address, or an equivalent one that dominates ADDR.
+
+ If we find the address, but neither ADDR dominates the found
+ address, nor the found one dominates ADDR, we're on different
+ execution paths. Add it.
+
+ If known, ENTRY_BLOCK is the entry block for the region, otherwise
+ NULL. */
+static void
+tm_log_add (basic_block entry_block, tree addr, gimple stmt)
+{
+ void **slot;
+ struct tm_log_entry l, *lp;
+
+ l.addr = addr;
+ slot = htab_find_slot (tm_log, &l, INSERT);
+ if (!*slot)
+ {
+ tree type = TREE_TYPE (addr);
+
+ lp = XNEW (struct tm_log_entry);
+ lp->addr = addr;
+ *slot = lp;
+
+ /* Small invariant addresses can be handled as save/restores. */
+ if (entry_block
+ && transaction_invariant_address_p (lp->addr, entry_block)
+ && TYPE_SIZE_UNIT (type) != NULL
+ && host_integerp (TYPE_SIZE_UNIT (type), 1)
+ && (tree_low_cst (TYPE_SIZE_UNIT (type), 1)
+ < PARAM_VALUE (PARAM_TM_MAX_AGGREGATE_SIZE))
+ /* We must be able to copy this type normally. I.e., no
+ special constructors and the like. */
+ && !TREE_ADDRESSABLE (type))
+ {
+ lp->save_var = create_tmp_var (TREE_TYPE (lp->addr), "tm_save");
+ add_referenced_var (lp->save_var);
+ lp->stmts = NULL;
+ lp->entry_block = entry_block;
+ /* Save addresses separately in dominator order so we don't
+ get confused by overlapping addresses in the save/restore
+ sequence. */
+ VEC_safe_push (tree, heap, tm_log_save_addresses, lp->addr);
+ }
+ else
+ {
+ /* Use the logging functions. */
+ lp->stmts = VEC_alloc (gimple, heap, 5);
+ VEC_quick_push (gimple, lp->stmts, stmt);
+ lp->save_var = NULL;
+ }
+ }
+ else
+ {
+ size_t i;
+ gimple oldstmt;
+
+ lp = (struct tm_log_entry *) *slot;
+
+ /* If we're generating a save/restore sequence, we don't care
+ about statements. */
+ if (lp->save_var)
+ return;
+
+ for (i = 0; VEC_iterate (gimple, lp->stmts, i, oldstmt); ++i)
+ {
+ if (stmt == oldstmt)
+ return;
+ /* We already have a store to the same address, higher up the
+ dominator tree. Nothing to do. */
+ if (dominated_by_p (CDI_DOMINATORS,
+ gimple_bb (stmt), gimple_bb (oldstmt)))
+ return;
+ /* We should be processing blocks in dominator tree order. */
+ gcc_assert (!dominated_by_p (CDI_DOMINATORS,
+ gimple_bb (oldstmt), gimple_bb (stmt)));
+ }
+ /* Store is on a different code path. */
+ VEC_safe_push (gimple, heap, lp->stmts, stmt);
+ }
+}
+
+/* Gimplify the address of a TARGET_MEM_REF. Return the SSA_NAME
+ result, insert the new statements before GSI. */
+
+static tree
+gimplify_addr (gimple_stmt_iterator *gsi, tree x)
+{
+ if (TREE_CODE (x) == TARGET_MEM_REF)
+ x = tree_mem_ref_addr (build_pointer_type (TREE_TYPE (x)), x);
+ else
+ x = build_fold_addr_expr (x);
+ return force_gimple_operand_gsi (gsi, x, true, NULL, true, GSI_SAME_STMT);
+}
+
+/* Instrument one address with the logging functions.
+ ADDR is the address to save.
+ STMT is the statement before which to place it. */
+static void
+tm_log_emit_stmt (tree addr, gimple stmt)
+{
+ tree type = TREE_TYPE (addr);
+ tree size = TYPE_SIZE_UNIT (type);
+ gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
+ gimple log;
+ enum built_in_function code = BUILT_IN_TM_LOG;
+
+ if (type == float_type_node)
+ code = BUILT_IN_TM_LOG_FLOAT;
+ else if (type == double_type_node)
+ code = BUILT_IN_TM_LOG_DOUBLE;
+ else if (type == long_double_type_node)
+ code = BUILT_IN_TM_LOG_LDOUBLE;
+ else if (host_integerp (size, 1))
+ {
+ unsigned int n = tree_low_cst (size, 1);
+ switch (n)
+ {
+ case 1:
+ code = BUILT_IN_TM_LOG_1;
+ break;
+ case 2:
+ code = BUILT_IN_TM_LOG_2;
+ break;
+ case 4:
+ code = BUILT_IN_TM_LOG_4;
+ break;
+ case 8:
+ code = BUILT_IN_TM_LOG_8;
+ break;
+ default:
+ code = BUILT_IN_TM_LOG;
+ if (TREE_CODE (type) == VECTOR_TYPE)
+ {
+ if (n == 8 && builtin_decl_explicit (BUILT_IN_TM_LOG_M64))
+ code = BUILT_IN_TM_LOG_M64;
+ else if (n == 16 && builtin_decl_explicit (BUILT_IN_TM_LOG_M128))
+ code = BUILT_IN_TM_LOG_M128;
+ else if (n == 32 && builtin_decl_explicit (BUILT_IN_TM_LOG_M256))
+ code = BUILT_IN_TM_LOG_M256;
+ }
+ break;
+ }
+ }
+
+ addr = gimplify_addr (&gsi, addr);
+ if (code == BUILT_IN_TM_LOG)
+ log = gimple_build_call (builtin_decl_explicit (code), 2, addr, size);
+ else
+ log = gimple_build_call (builtin_decl_explicit (code), 1, addr);
+ gsi_insert_before (&gsi, log, GSI_SAME_STMT);
+}
+
+/* Go through the log and instrument address that must be instrumented
+ with the logging functions. Leave the save/restore addresses for
+ later. */
+static void
+tm_log_emit (void)
+{
+ htab_iterator hi;
+ struct tm_log_entry *lp;
+
+ FOR_EACH_HTAB_ELEMENT (tm_log, lp, tm_log_entry_t, hi)
+ {
+ size_t i;
+ gimple stmt;
+
+ if (dump_file)
+ {
+ fprintf (dump_file, "TM thread private mem logging: ");
+ print_generic_expr (dump_file, lp->addr, 0);
+ fprintf (dump_file, "\n");
+ }
+
+ if (lp->save_var)
+ {
+ if (dump_file)
+ fprintf (dump_file, "DUMPING to variable\n");
+ continue;
+ }
+ else
+ {
+ if (dump_file)
+ fprintf (dump_file, "DUMPING with logging functions\n");
+ for (i = 0; VEC_iterate (gimple, lp->stmts, i, stmt); ++i)
+ tm_log_emit_stmt (lp->addr, stmt);
+ }
+ }
+}
+
+/* Emit the save sequence for the corresponding addresses in the log.
+ ENTRY_BLOCK is the entry block for the transaction.
+ BB is the basic block to insert the code in. */
+static void
+tm_log_emit_saves (basic_block entry_block, basic_block bb)
+{
+ size_t i;
+ gimple_stmt_iterator gsi = gsi_last_bb (bb);
+ gimple stmt;
+ struct tm_log_entry l, *lp;
+
+ for (i = 0; i < VEC_length (tree, tm_log_save_addresses); ++i)
+ {
+ l.addr = VEC_index (tree, tm_log_save_addresses, i);
+ lp = (struct tm_log_entry *) *htab_find_slot (tm_log, &l, NO_INSERT);
+ gcc_assert (lp->save_var != NULL);
+
+ /* We only care about variables in the current transaction. */
+ if (lp->entry_block != entry_block)
+ continue;
+
+ stmt = gimple_build_assign (lp->save_var, unshare_expr (lp->addr));
+
+ /* Make sure we can create an SSA_NAME for this type. For
+ instance, aggregates aren't allowed, in which case the system
+ will create a VOP for us and everything will just work. */
+ if (is_gimple_reg_type (TREE_TYPE (lp->save_var)))
+ {
+ lp->save_var = make_ssa_name (lp->save_var, stmt);
+ gimple_assign_set_lhs (stmt, lp->save_var);
+ }
+
+ gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
+ }
+}
+
+/* Emit the restore sequence for the corresponding addresses in the log.
+ ENTRY_BLOCK is the entry block for the transaction.
+ BB is the basic block to insert the code in. */
+static void
+tm_log_emit_restores (basic_block entry_block, basic_block bb)
+{
+ int i;
+ struct tm_log_entry l, *lp;
+ gimple_stmt_iterator gsi;
+ gimple stmt;
+
+ for (i = VEC_length (tree, tm_log_save_addresses) - 1; i >= 0; i--)
+ {
+ l.addr = VEC_index (tree, tm_log_save_addresses, i);
+ lp = (struct tm_log_entry *) *htab_find_slot (tm_log, &l, NO_INSERT);
+ gcc_assert (lp->save_var != NULL);
+
+ /* We only care about variables in the current transaction. */
+ if (lp->entry_block != entry_block)
+ continue;
+
+ /* Restores are in LIFO order from the saves in case we have
+ overlaps. */
+ gsi = gsi_start_bb (bb);
+
+ stmt = gimple_build_assign (unshare_expr (lp->addr), lp->save_var);
+ gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
+ }
+}
+
+/* Emit the checks for performing either a save or a restore sequence.
+
+ TRXN_PROP is either A_SAVELIVEVARIABLES or A_RESTORELIVEVARIABLES.
+
+ The code sequence is inserted in a new basic block created in
+ END_BB which is inserted between BEFORE_BB and the destination of
+ FALLTHRU_EDGE.
+
+ STATUS is the return value from _ITM_beginTransaction.
+ ENTRY_BLOCK is the entry block for the transaction.
+ EMITF is a callback to emit the actual save/restore code.
+
+ The basic block containing the conditional checking for TRXN_PROP
+ is returned. */
+static basic_block
+tm_log_emit_save_or_restores (basic_block entry_block,
+ unsigned trxn_prop,
+ tree status,
+ void (*emitf)(basic_block, basic_block),
+ basic_block before_bb,
+ edge fallthru_edge,
+ basic_block *end_bb)
+{
+ basic_block cond_bb, code_bb;
+ gimple cond_stmt, stmt;
+ gimple_stmt_iterator gsi;
+ tree t1, t2;
+ int old_flags = fallthru_edge->flags;
+
+ cond_bb = create_empty_bb (before_bb);
+ code_bb = create_empty_bb (cond_bb);
+ *end_bb = create_empty_bb (code_bb);
+ redirect_edge_pred (fallthru_edge, *end_bb);
+ fallthru_edge->flags = EDGE_FALLTHRU;
+ make_edge (before_bb, cond_bb, old_flags);
+
+ set_immediate_dominator (CDI_DOMINATORS, cond_bb, before_bb);
+ set_immediate_dominator (CDI_DOMINATORS, code_bb, cond_bb);
+
+ gsi = gsi_last_bb (cond_bb);
+
+ /* t1 = status & A_{property}. */
+ t1 = make_rename_temp (TREE_TYPE (status), NULL);
+ t2 = build_int_cst (TREE_TYPE (status), trxn_prop);
+ stmt = gimple_build_assign_with_ops (BIT_AND_EXPR, t1, status, t2);
+ gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
+
+ /* if (t1). */
+ t2 = build_int_cst (TREE_TYPE (status), 0);
+ cond_stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
+ gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING);
+
+ emitf (entry_block, code_bb);
+
+ make_edge (cond_bb, code_bb, EDGE_TRUE_VALUE);
+ make_edge (cond_bb, *end_bb, EDGE_FALSE_VALUE);
+ make_edge (code_bb, *end_bb, EDGE_FALLTHRU);
+
+ return cond_bb;
+}
+
+static tree lower_sequence_tm (gimple_stmt_iterator *, bool *,
+ struct walk_stmt_info *);
+static tree lower_sequence_no_tm (gimple_stmt_iterator *, bool *,
+ struct walk_stmt_info *);
+
+/* Evaluate an address X being dereferenced and determine if it
+ originally points to a non aliased new chunk of memory (malloc,
+ alloca, etc).
+
+ Return MEM_THREAD_LOCAL if it points to a thread-local address.
+ Return MEM_TRANSACTION_LOCAL if it points to a transaction-local address.
+ Return MEM_NON_LOCAL otherwise.
+
+ ENTRY_BLOCK is the entry block to the transaction containing the
+ dereference of X. */
+static enum thread_memory_type
+thread_private_new_memory (basic_block entry_block, tree x)
+{
+ gimple stmt = NULL;
+ enum tree_code code;
+ void **slot;
+ tm_new_mem_map_t elt, *elt_p;
+ tree val = x;
+ enum thread_memory_type retval = mem_transaction_local;
+
+ if (!entry_block
+ || TREE_CODE (x) != SSA_NAME
+ /* Possible uninitialized use, or a function argument. In
+ either case, we don't care. */
+ || SSA_NAME_IS_DEFAULT_DEF (x))
+ return mem_non_local;
+
+ /* Look in cache first. */
+ elt.val = x;
+ slot = htab_find_slot (tm_new_mem_hash, &elt, INSERT);
+ elt_p = (tm_new_mem_map_t *) *slot;
+ if (elt_p)
+ return elt_p->local_new_memory;
+
+ /* Optimistically assume the memory is transaction local during
+ processing. This catches recursion into this variable. */
+ *slot = elt_p = XNEW (tm_new_mem_map_t);
+ elt_p->val = val;
+ elt_p->local_new_memory = mem_transaction_local;
+
+ /* Search DEF chain to find the original definition of this address. */
+ do
+ {
+ if (ptr_deref_may_alias_global_p (x))
+ {
+ /* Address escapes. This is not thread-private. */
+ retval = mem_non_local;
+ goto new_memory_ret;
+ }
+
+ stmt = SSA_NAME_DEF_STMT (x);
+
+ /* If the malloc call is outside the transaction, this is
+ thread-local. */
+ if (retval != mem_thread_local
+ && !dominated_by_p (CDI_DOMINATORS, gimple_bb (stmt), entry_block))
+ retval = mem_thread_local;
+
+ if (is_gimple_assign (stmt))
+ {
+ code = gimple_assign_rhs_code (stmt);
+ /* x = foo ==> foo */
+ if (code == SSA_NAME)
+ x = gimple_assign_rhs1 (stmt);
+ /* x = foo + n ==> foo */
+ else if (code == POINTER_PLUS_EXPR)
+ x = gimple_assign_rhs1 (stmt);
+ /* x = (cast*) foo ==> foo */
+ else if (code == VIEW_CONVERT_EXPR || code == NOP_EXPR)
+ x = gimple_assign_rhs1 (stmt);
+ else
+ {
+ retval = mem_non_local;
+ goto new_memory_ret;
+ }
+ }
+ else
+ {
+ if (gimple_code (stmt) == GIMPLE_PHI)
+ {
+ unsigned int i;
+ enum thread_memory_type mem;
+ tree phi_result = gimple_phi_result (stmt);
+
+ /* If any of the ancestors are non-local, we are sure to
+ be non-local. Otherwise we can avoid doing anything
+ and inherit what has already been generated. */
+ retval = mem_max;
+ for (i = 0; i < gimple_phi_num_args (stmt); ++i)
+ {
+ tree op = PHI_ARG_DEF (stmt, i);
+
+ /* Exclude self-assignment. */
+ if (phi_result == op)
+ continue;
+
+ mem = thread_private_new_memory (entry_block, op);
+ if (mem == mem_non_local)
+ {
+ retval = mem;
+ goto new_memory_ret;
+ }
+ retval = MIN (retval, mem);
+ }
+ goto new_memory_ret;
+ }
+ break;
+ }
+ }
+ while (TREE_CODE (x) == SSA_NAME);
+
+ if (stmt && is_gimple_call (stmt) && gimple_call_flags (stmt) & ECF_MALLOC)
+ /* Thread-local or transaction-local. */
+ ;
+ else
+ retval = mem_non_local;
+
+ new_memory_ret:
+ elt_p->local_new_memory = retval;
+ return retval;
+}
+
+/* Determine whether X has to be instrumented using a read
+ or write barrier.
+
+ ENTRY_BLOCK is the entry block for the region where stmt resides
+ in. NULL if unknown.
+
+ STMT is the statement in which X occurs in. It is used for thread
+ private memory instrumentation. If no TPM instrumentation is
+ desired, STMT should be null. */
+static bool
+requires_barrier (basic_block entry_block, tree x, gimple stmt)
+{
+ tree orig = x;
+ while (handled_component_p (x))
+ x = TREE_OPERAND (x, 0);
+
+ switch (TREE_CODE (x))
+ {
+ case INDIRECT_REF:
+ case MEM_REF:
+ {
+ enum thread_memory_type ret;
+
+ ret = thread_private_new_memory (entry_block, TREE_OPERAND (x, 0));
+ if (ret == mem_non_local)
+ return true;
+ if (stmt && ret == mem_thread_local)
+ /* ?? Should we pass `orig', or the INDIRECT_REF X. ?? */
+ tm_log_add (entry_block, orig, stmt);
+
+ /* Transaction-locals require nothing at all. For malloc, a
+ transaction restart frees the memory and we reallocate.
+ For alloca, the stack pointer gets reset by the retry and
+ we reallocate. */
+ return false;
+ }
+
+ case TARGET_MEM_REF:
+ if (TREE_CODE (TMR_BASE (x)) != ADDR_EXPR)
+ return true;
+ x = TREE_OPERAND (TMR_BASE (x), 0);
+ if (TREE_CODE (x) == PARM_DECL)
+ return false;
+ gcc_assert (TREE_CODE (x) == VAR_DECL);
+ /* FALLTHRU */
+
+ case PARM_DECL:
+ case RESULT_DECL:
+ case VAR_DECL:
+ if (DECL_BY_REFERENCE (x))
+ {
+ /* ??? This value is a pointer, but aggregate_value_p has been
+ jigged to return true which confuses needs_to_live_in_memory.
+ This ought to be cleaned up generically.
+
+ FIXME: Verify this still happens after the next mainline
+ merge. Testcase ie g++.dg/tm/pr47554.C.
+ */
+ return false;
+ }
+
+ if (is_global_var (x))
+ return !TREE_READONLY (x);
+ if (/* FIXME: This condition should actually go below in the
+ tm_log_add() call, however is_call_clobbered() depends on
+ aliasing info which is not available during
+ gimplification. Since requires_barrier() gets called
+ during lower_sequence_tm/gimplification, leave the call
+ to needs_to_live_in_memory until we eliminate
+ lower_sequence_tm altogether. */
+ needs_to_live_in_memory (x)
+ /* X escapes. */
+ || ptr_deref_may_alias_global_p (x))
+ return true;
+ else
+ {
+ /* For local memory that doesn't escape (aka thread private
+ memory), we can either save the value at the beginning of
+ the transaction and restore on restart, or call a tm
+ function to dynamically save and restore on restart
+ (ITM_L*). */
+ if (stmt)
+ tm_log_add (entry_block, orig, stmt);
+ return false;
+ }
+
+ default:
+ return false;
+ }
+}
+
+/* Mark the GIMPLE_ASSIGN statement as appropriate for being inside
+ a transaction region. */
+
+static void
+examine_assign_tm (unsigned *state, gimple_stmt_iterator *gsi)
+{
+ gimple stmt = gsi_stmt (*gsi);
+
+ if (requires_barrier (/*entry_block=*/NULL, gimple_assign_rhs1 (stmt), NULL))
+ *state |= GTMA_HAVE_LOAD;
+ if (requires_barrier (/*entry_block=*/NULL, gimple_assign_lhs (stmt), NULL))
+ *state |= GTMA_HAVE_STORE;
+}
+
+/* Mark a GIMPLE_CALL as appropriate for being inside a transaction. */
+
+static void
+examine_call_tm (unsigned *state, gimple_stmt_iterator *gsi)
+{
+ gimple stmt = gsi_stmt (*gsi);
+ tree fn;
+
+ if (is_tm_pure_call (stmt))
+ return;
+
+ /* Check if this call is a transaction abort. */
+ fn = gimple_call_fndecl (stmt);
+ if (is_tm_abort (fn))
+ *state |= GTMA_HAVE_ABORT;
+
+ /* Note that something may happen. */
+ *state |= GTMA_HAVE_LOAD | GTMA_HAVE_STORE;
+}
+
+/* Lower a GIMPLE_TRANSACTION statement. */
+
+static void
+lower_transaction (gimple_stmt_iterator *gsi, struct walk_stmt_info *wi)
+{
+ gimple g, stmt = gsi_stmt (*gsi);
+ unsigned int *outer_state = (unsigned int *) wi->info;
+ unsigned int this_state = 0;
+ struct walk_stmt_info this_wi;
+
+ /* First, lower the body. The scanning that we do inside gives
+ us some idea of what we're dealing with. */
+ memset (&this_wi, 0, sizeof (this_wi));
+ this_wi.info = (void *) &this_state;
+ walk_gimple_seq (gimple_transaction_body (stmt),
+ lower_sequence_tm, NULL, &this_wi);
+
+ /* If there was absolutely nothing transaction related inside the
+ transaction, we may elide it. Likewise if this is a nested
+ transaction and does not contain an abort. */
+ if (this_state == 0
+ || (!(this_state & GTMA_HAVE_ABORT) && outer_state != NULL))
+ {
+ if (outer_state)
+ *outer_state |= this_state;
+
+ gsi_insert_seq_before (gsi, gimple_transaction_body (stmt),
+ GSI_SAME_STMT);
+ gimple_transaction_set_body (stmt, NULL);
+
+ gsi_remove (gsi, true);
+ wi->removed_stmt = true;
+ return;
+ }
+
+ /* Wrap the body of the transaction in a try-finally node so that
+ the commit call is always properly called. */
+ g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT), 0);
+ if (flag_exceptions)
+ {
+ tree ptr;
+ gimple_seq n_seq, e_seq;
+
+ n_seq = gimple_seq_alloc_with_stmt (g);
+ e_seq = gimple_seq_alloc ();
+
+ g = gimple_build_call (builtin_decl_explicit (BUILT_IN_EH_POINTER),
+ 1, integer_zero_node);
+ ptr = create_tmp_var (ptr_type_node, NULL);
+ gimple_call_set_lhs (g, ptr);
+ gimple_seq_add_stmt (&e_seq, g);
+
+ g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT_EH),
+ 1, ptr);
+ gimple_seq_add_stmt (&e_seq, g);
+
+ g = gimple_build_eh_else (n_seq, e_seq);
+ }
+
+ g = gimple_build_try (gimple_transaction_body (stmt),
+ gimple_seq_alloc_with_stmt (g), GIMPLE_TRY_FINALLY);
+ gsi_insert_after (gsi, g, GSI_CONTINUE_LINKING);
+
+ gimple_transaction_set_body (stmt, NULL);
+
+ /* If the transaction calls abort or if this is an outer transaction,
+ add an "over" label afterwards. */
+ if ((this_state & (GTMA_HAVE_ABORT))
+ || (gimple_transaction_subcode(stmt) & GTMA_IS_OUTER))
+ {
+ tree label = create_artificial_label (UNKNOWN_LOCATION);
+ gimple_transaction_set_label (stmt, label);
+ gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
+ }
+
+ /* Record the set of operations found for use later. */
+ this_state |= gimple_transaction_subcode (stmt) & GTMA_DECLARATION_MASK;
+ gimple_transaction_set_subcode (stmt, this_state);
+}
+
+/* Iterate through the statements in the sequence, lowering them all
+ as appropriate for being in a transaction. */
+
+static tree
+lower_sequence_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p,
+ struct walk_stmt_info *wi)
+{
+ unsigned int *state = (unsigned int *) wi->info;
+ gimple stmt = gsi_stmt (*gsi);
+
+ *handled_ops_p = true;
+ switch (gimple_code (stmt))
+ {
+ case GIMPLE_ASSIGN:
+ /* Only memory reads/writes need to be instrumented. */
+ if (gimple_assign_single_p (stmt))
+ examine_assign_tm (state, gsi);
+ break;
+
+ case GIMPLE_CALL:
+ examine_call_tm (state, gsi);
+ break;
+
+ case GIMPLE_ASM:
+ *state |= GTMA_MAY_ENTER_IRREVOCABLE;
+ break;
+
+ case GIMPLE_TRANSACTION:
+ lower_transaction (gsi, wi);
+ break;
+
+ default:
+ *handled_ops_p = !gimple_has_substatements (stmt);
+ break;
+ }
+
+ return NULL_TREE;
+}
+
+/* Iterate through the statements in the sequence, lowering them all
+ as appropriate for being outside of a transaction. */
+
+static tree
+lower_sequence_no_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p,
+ struct walk_stmt_info * wi)
+{
+ gimple stmt = gsi_stmt (*gsi);
+
+ if (gimple_code (stmt) == GIMPLE_TRANSACTION)
+ {
+ *handled_ops_p = true;
+ lower_transaction (gsi, wi);
+ }
+ else
+ *handled_ops_p = !gimple_has_substatements (stmt);
+
+ return NULL_TREE;
+}
+
+/* Main entry point for flattening GIMPLE_TRANSACTION constructs. After
+ this, GIMPLE_TRANSACTION nodes still exist, but the nested body has
+ been moved out, and all the data required for constructing a proper
+ CFG has been recorded. */
+
+static unsigned int
+execute_lower_tm (void)
+{
+ struct walk_stmt_info wi;
+
+ /* Transactional clones aren't created until a later pass. */
+ gcc_assert (!decl_is_tm_clone (current_function_decl));
+
+ memset (&wi, 0, sizeof (wi));
+ walk_gimple_seq (gimple_body (current_function_decl),
+ lower_sequence_no_tm, NULL, &wi);
+
+ return 0;
+}
+
+struct gimple_opt_pass pass_lower_tm =
+{
+ {
+ GIMPLE_PASS,
+ "tmlower", /* name */
+ gate_tm, /* gate */
+ execute_lower_tm, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_TRANS_MEM, /* tv_id */
+ PROP_gimple_lcf, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_dump_func /* todo_flags_finish */
+ }
+};
+
+/* Collect region information for each transaction. */
+
+struct tm_region
+{
+ /* Link to the next unnested transaction. */
+ struct tm_region *next;
+
+ /* Link to the next inner transaction. */
+ struct tm_region *inner;
+
+ /* Link to the next outer transaction. */
+ struct tm_region *outer;
+
+ /* The GIMPLE_TRANSACTION statement beginning this transaction. */
+ gimple transaction_stmt;
+
+ /* The entry block to this region. */
+ basic_block entry_block;
+
+ /* The set of all blocks that end the region; NULL if only EXIT_BLOCK.
+ These blocks are still a part of the region (i.e., the border is
+ inclusive). Note that this set is only complete for paths in the CFG
+ starting at ENTRY_BLOCK, and that there is no exit block recorded for
+ the edge to the "over" label. */
+ bitmap exit_blocks;
+
+ /* The set of all blocks that have an TM_IRREVOCABLE call. */
+ bitmap irr_blocks;
+};
+
+/* True if there are pending edge statements to be committed for the
+ current function being scanned in the tmmark pass. */
+bool pending_edge_inserts_p;
+
+static struct tm_region *all_tm_regions;
+static bitmap_obstack tm_obstack;
+
+
+/* A subroutine of tm_region_init. Record the existance of the
+ GIMPLE_TRANSACTION statement in a tree of tm_region elements. */
+
+static struct tm_region *
+tm_region_init_0 (struct tm_region *outer, basic_block bb, gimple stmt)
+{
+ struct tm_region *region;
+
+ region = (struct tm_region *)
+ obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
+
+ if (outer)
+ {
+ region->next = outer->inner;
+ outer->inner = region;
+ }
+ else
+ {
+ region->next = all_tm_regions;
+ all_tm_regions = region;
+ }
+ region->inner = NULL;
+ region->outer = outer;
+
+ region->transaction_stmt = stmt;
+
+ /* There are either one or two edges out of the block containing
+ the GIMPLE_TRANSACTION, one to the actual region and one to the
+ "over" label if the region contains an abort. The former will
+ always be the one marked FALLTHRU. */
+ region->entry_block = FALLTHRU_EDGE (bb)->dest;
+
+ region->exit_blocks = BITMAP_ALLOC (&tm_obstack);
+ region->irr_blocks = BITMAP_ALLOC (&tm_obstack);
+
+ return region;
+}
+
+/* A subroutine of tm_region_init. Record all the exit and
+ irrevocable blocks in BB into the region's exit_blocks and
+ irr_blocks bitmaps. Returns the new region being scanned. */
+
+static struct tm_region *
+tm_region_init_1 (struct tm_region *region, basic_block bb)
+{
+ gimple_stmt_iterator gsi;
+ gimple g;
+
+ if (!region
+ || (!region->irr_blocks && !region->exit_blocks))
+ return region;
+
+ /* Check to see if this is the end of a region by seeing if it
+ contains a call to __builtin_tm_commit{,_eh}. Note that the
+ outermost region for DECL_IS_TM_CLONE need not collect this. */
+ for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
+ {
+ g = gsi_stmt (gsi);
+ if (gimple_code (g) == GIMPLE_CALL)
+ {
+ tree fn = gimple_call_fndecl (g);
+ if (fn && DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL)
+ {
+ if ((DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT
+ || DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT_EH)
+ && region->exit_blocks)
+ {
+ bitmap_set_bit (region->exit_blocks, bb->index);
+ region = region->outer;
+ break;
+ }
+ if (DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_IRREVOCABLE)
+ bitmap_set_bit (region->irr_blocks, bb->index);
+ }
+ }
+ }
+ return region;
+}
+
+/* Collect all of the transaction regions within the current function
+ and record them in ALL_TM_REGIONS. The REGION parameter may specify
+ an "outermost" region for use by tm clones. */
+
+static void
+tm_region_init (struct tm_region *region)
+{
+ gimple g;
+ edge_iterator ei;
+ edge e;
+ basic_block bb;
+ VEC(basic_block, heap) *queue = NULL;
+ bitmap visited_blocks = BITMAP_ALLOC (NULL);
+ struct tm_region *old_region;
+
+ all_tm_regions = region;
+ bb = single_succ (ENTRY_BLOCK_PTR);
+
+ VEC_safe_push (basic_block, heap, queue, bb);
+ gcc_assert (!bb->aux); /* FIXME: Remove me. */
+ bb->aux = region;
+ do
+ {
+ bb = VEC_pop (basic_block, queue);
+ region = (struct tm_region *)bb->aux;
+ bb->aux = NULL;
+
+ /* Record exit and irrevocable blocks. */
+ region = tm_region_init_1 (region, bb);
+
+ /* Check for the last statement in the block beginning a new region. */
+ g = last_stmt (bb);
+ old_region = region;
+ if (g && gimple_code (g) == GIMPLE_TRANSACTION)
+ region = tm_region_init_0 (region, bb, g);
+
+ /* Process subsequent blocks. */
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if (!bitmap_bit_p (visited_blocks, e->dest->index))
+ {
+ bitmap_set_bit (visited_blocks, e->dest->index);
+ VEC_safe_push (basic_block, heap, queue, e->dest);
+ gcc_assert (!e->dest->aux); /* FIXME: Remove me. */
+
+ /* If the current block started a new region, make sure that only
+ the entry block of the new region is associated with this region.
+ Other successors are still part of the old region. */
+ if (old_region != region && e->dest != region->entry_block)
+ e->dest->aux = old_region;
+ else
+ e->dest->aux = region;
+ }
+ }
+ while (!VEC_empty (basic_block, queue));
+ VEC_free (basic_block, heap, queue);
+ BITMAP_FREE (visited_blocks);
+}
+
+/* The "gate" function for all transactional memory expansion and optimization
+ passes. We collect region information for each top-level transaction, and
+ if we don't find any, we skip all of the TM passes. Each region will have
+ all of the exit blocks recorded, and the originating statement. */
+
+static bool
+gate_tm_init (void)
+{
+ if (!flag_tm)
+ return false;
+
+ calculate_dominance_info (CDI_DOMINATORS);
+ bitmap_obstack_initialize (&tm_obstack);
+
+ /* If the function is a TM_CLONE, then the entire function is the region. */
+ if (decl_is_tm_clone (current_function_decl))
+ {
+ struct tm_region *region = (struct tm_region *)
+ obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
+ memset (region, 0, sizeof (*region));
+ region->entry_block = single_succ (ENTRY_BLOCK_PTR);
+ /* For a clone, the entire function is the region. But even if
+ we don't need to record any exit blocks, we may need to
+ record irrevocable blocks. */
+ region->irr_blocks = BITMAP_ALLOC (&tm_obstack);
+
+ tm_region_init (region);
+ }
+ else
+ {
+ tm_region_init (NULL);
+
+ /* If we didn't find any regions, cleanup and skip the whole tree
+ of tm-related optimizations. */
+ if (all_tm_regions == NULL)
+ {
+ bitmap_obstack_release (&tm_obstack);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+struct gimple_opt_pass pass_tm_init =
+{
+ {
+ GIMPLE_PASS,
+ "*tminit", /* name */
+ gate_tm_init, /* gate */
+ NULL, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_TRANS_MEM, /* tv_id */
+ PROP_ssa | PROP_cfg, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
+ }
+};
+
+/* Add FLAGS to the GIMPLE_TRANSACTION subcode for the transaction region
+ represented by STATE. */
+
+static inline void
+transaction_subcode_ior (struct tm_region *region, unsigned flags)
+{
+ if (region && region->transaction_stmt)
+ {
+ flags |= gimple_transaction_subcode (region->transaction_stmt);
+ gimple_transaction_set_subcode (region->transaction_stmt, flags);
+ }
+}
+
+/* Construct a memory load in a transactional context. Return the
+ gimple statement performing the load, or NULL if there is no
+ TM_LOAD builtin of the appropriate size to do the load.
+
+ LOC is the location to use for the new statement(s). */
+
+static gimple
+build_tm_load (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
+{
+ enum built_in_function code = END_BUILTINS;
+ tree t, type = TREE_TYPE (rhs), decl;
+ gimple gcall;
+
+ if (type == float_type_node)
+ code = BUILT_IN_TM_LOAD_FLOAT;
+ else if (type == double_type_node)
+ code = BUILT_IN_TM_LOAD_DOUBLE;
+ else if (type == long_double_type_node)
+ code = BUILT_IN_TM_LOAD_LDOUBLE;
+ else if (TYPE_SIZE_UNIT (type) != NULL
+ && host_integerp (TYPE_SIZE_UNIT (type), 1))
+ {
+ switch (tree_low_cst (TYPE_SIZE_UNIT (type), 1))
+ {
+ case 1:
+ code = BUILT_IN_TM_LOAD_1;
+ break;
+ case 2:
+ code = BUILT_IN_TM_LOAD_2;
+ break;
+ case 4:
+ code = BUILT_IN_TM_LOAD_4;
+ break;
+ case 8:
+ code = BUILT_IN_TM_LOAD_8;
+ break;
+ }
+ }
+
+ if (code == END_BUILTINS)
+ {
+ decl = targetm.vectorize.builtin_tm_load (type);
+ if (!decl)
+ return NULL;
+ }
+ else
+ decl = builtin_decl_explicit (code);
+
+ t = gimplify_addr (gsi, rhs);
+ gcall = gimple_build_call (decl, 1, t);
+ gimple_set_location (gcall, loc);
+
+ t = TREE_TYPE (TREE_TYPE (decl));
+ if (useless_type_conversion_p (type, t))
+ {
+ gimple_call_set_lhs (gcall, lhs);
+ gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
+ }
+ else
+ {
+ gimple g;
+ tree temp;
+
+ temp = make_rename_temp (t, NULL);
+ gimple_call_set_lhs (gcall, temp);
+ gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
+
+ t = fold_build1 (VIEW_CONVERT_EXPR, type, temp);
+ g = gimple_build_assign (lhs, t);
+ gsi_insert_before (gsi, g, GSI_SAME_STMT);
+ }
+
+ return gcall;
+}
+
+
+/* Similarly for storing TYPE in a transactional context. */
+
+static gimple
+build_tm_store (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
+{
+ enum built_in_function code = END_BUILTINS;
+ tree t, fn, type = TREE_TYPE (rhs), simple_type;
+ gimple gcall;
+
+ if (type == float_type_node)
+ code = BUILT_IN_TM_STORE_FLOAT;
+ else if (type == double_type_node)
+ code = BUILT_IN_TM_STORE_DOUBLE;
+ else if (type == long_double_type_node)
+ code = BUILT_IN_TM_STORE_LDOUBLE;
+ else if (TYPE_SIZE_UNIT (type) != NULL
+ && host_integerp (TYPE_SIZE_UNIT (type), 1))
+ {
+ switch (tree_low_cst (TYPE_SIZE_UNIT (type), 1))
+ {
+ case 1:
+ code = BUILT_IN_TM_STORE_1;
+ break;
+ case 2:
+ code = BUILT_IN_TM_STORE_2;
+ break;
+ case 4:
+ code = BUILT_IN_TM_STORE_4;
+ break;
+ case 8:
+ code = BUILT_IN_TM_STORE_8;
+ break;
+ }
+ }
+
+ if (code == END_BUILTINS)
+ {
+ fn = targetm.vectorize.builtin_tm_store (type);
+ if (!fn)
+ return NULL;
+ }
+ else
+ fn = builtin_decl_explicit (code);
+
+ simple_type = TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (fn))));
+
+ if (TREE_CODE (rhs) == CONSTRUCTOR)
+ {
+ /* Handle the easy initialization to zero. */
+ if (CONSTRUCTOR_ELTS (rhs) == 0)
+ rhs = build_int_cst (simple_type, 0);
+ else
+ {
+ /* ...otherwise punt to the caller and probably use
+ BUILT_IN_TM_MEMMOVE, because we can't wrap a
+ VIEW_CONVERT_EXPR around a CONSTRUCTOR (below) and produce
+ valid gimple. */
+ return NULL;
+ }
+ }
+ else if (!useless_type_conversion_p (simple_type, type))
+ {
+ gimple g;
+ tree temp;
+
+ temp = make_rename_temp (simple_type, NULL);
+ t = fold_build1 (VIEW_CONVERT_EXPR, simple_type, rhs);
+ g = gimple_build_assign (temp, t);
+ gimple_set_location (g, loc);
+ gsi_insert_before (gsi, g, GSI_SAME_STMT);
+
+ rhs = temp;
+ }
+
+ t = gimplify_addr (gsi, lhs);
+ gcall = gimple_build_call (fn, 2, t, rhs);
+ gimple_set_location (gcall, loc);
+ gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
+
+ return gcall;
+}
+
+
+/* Expand an assignment statement into transactional builtins. */
+
+static void
+expand_assign_tm (struct tm_region *region, gimple_stmt_iterator *gsi)
+{
+ gimple stmt = gsi_stmt (*gsi);
+ location_t loc = gimple_location (stmt);
+ tree lhs = gimple_assign_lhs (stmt);
+ tree rhs = gimple_assign_rhs1 (stmt);
+ bool store_p = requires_barrier (region->entry_block, lhs, NULL);
+ bool load_p = requires_barrier (region->entry_block, rhs, NULL);
+ gimple gcall = NULL;
+
+ if (!load_p && !store_p)
+ {
+ /* Add thread private addresses to log if applicable. */
+ requires_barrier (region->entry_block, lhs, stmt);
+ gsi_next (gsi);
+ return;
+ }
+
+ gsi_remove (gsi, true);
+
+ if (load_p && !store_p)
+ {
+ transaction_subcode_ior (region, GTMA_HAVE_LOAD);
+ gcall = build_tm_load (loc, lhs, rhs, gsi);
+ }
+ else if (store_p && !load_p)
+ {
+ transaction_subcode_ior (region, GTMA_HAVE_STORE);
+ gcall = build_tm_store (loc, lhs, rhs, gsi);
+ }
+ if (!gcall)
+ {
+ tree lhs_addr, rhs_addr;
+
+ if (load_p)
+ transaction_subcode_ior (region, GTMA_HAVE_LOAD);
+ if (store_p)
+ transaction_subcode_ior (region, GTMA_HAVE_STORE);
+
+ /* ??? Figure out if there's any possible overlap between the LHS
+ and the RHS and if not, use MEMCPY. */
+ lhs_addr = gimplify_addr (gsi, lhs);
+ rhs_addr = gimplify_addr (gsi, rhs);
+ gcall = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_MEMMOVE),
+ 3, lhs_addr, rhs_addr,
+ TYPE_SIZE_UNIT (TREE_TYPE (lhs)));
+ gimple_set_location (gcall, loc);
+ gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
+ }
+
+ /* Now that we have the load/store in its instrumented form, add
+ thread private addresses to the log if applicable. */
+ if (!store_p)
+ requires_barrier (region->entry_block, lhs, gcall);
+
+ /* add_stmt_to_tm_region (region, gcall); */
+}
+
+
+/* Expand a call statement as appropriate for a transaction. That is,
+ either verify that the call does not affect the transaction, or
+ redirect the call to a clone that handles transactions, or change
+ the transaction state to IRREVOCABLE. Return true if the call is
+ one of the builtins that end a transaction. */
+
+static bool
+expand_call_tm (struct tm_region *region,
+ gimple_stmt_iterator *gsi)
+{
+ gimple stmt = gsi_stmt (*gsi);
+ tree lhs = gimple_call_lhs (stmt);
+ tree fn_decl;
+ struct cgraph_node *node;
+ bool retval = false;
+
+ fn_decl = gimple_call_fndecl (stmt);
+
+ if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMCPY)
+ || fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMMOVE))
+ transaction_subcode_ior (region, GTMA_HAVE_STORE | GTMA_HAVE_LOAD);
+ if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMSET))
+ transaction_subcode_ior (region, GTMA_HAVE_STORE);
+
+ if (is_tm_pure_call (stmt))
+ return false;
+
+ if (fn_decl)
+ retval = is_tm_ending_fndecl (fn_decl);
+ if (!retval)
+ {
+ /* Assume all non-const/pure calls write to memory, except
+ transaction ending builtins. */
+ transaction_subcode_ior (region, GTMA_HAVE_STORE);
+ }
+
+ /* For indirect calls, we already generated a call into the runtime. */
+ if (!fn_decl)
+ {
+ tree fn = gimple_call_fn (stmt);
+
+ /* We are guaranteed never to go irrevocable on a safe or pure
+ call, and the pure call was handled above. */
+ if (is_tm_safe (fn))
+ return false;
+ else
+ transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
+
+ return false;
+ }
+
+ node = cgraph_get_node (fn_decl);
+ if (node->local.tm_may_enter_irr)
+ transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
+
+ if (is_tm_abort (fn_decl))
+ {
+ transaction_subcode_ior (region, GTMA_HAVE_ABORT);
+ return true;
+ }
+
+ /* Instrument the store if needed.
+
+ If the assignment happens inside the function call (return slot
+ optimization), there is no instrumentation to be done, since
+ the callee should have done the right thing. */
+ if (lhs && requires_barrier (region->entry_block, lhs, stmt)
+ && !gimple_call_return_slot_opt_p (stmt))
+ {
+ tree tmp = make_rename_temp (TREE_TYPE (lhs), NULL);
+ location_t loc = gimple_location (stmt);
+ edge fallthru_edge = NULL;
+
+ /* Remember if the call was going to throw. */
+ if (stmt_can_throw_internal (stmt))
+ {
+ edge_iterator ei;
+ edge e;
+ basic_block bb = gimple_bb (stmt);
+
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if (e->flags & EDGE_FALLTHRU)
+ {
+ fallthru_edge = e;
+ break;
+ }
+ }
+
+ gimple_call_set_lhs (stmt, tmp);
+ update_stmt (stmt);
+ stmt = gimple_build_assign (lhs, tmp);
+ gimple_set_location (stmt, loc);
+
+ /* We cannot throw in the middle of a BB. If the call was going
+ to throw, place the instrumentation on the fallthru edge, so
+ the call remains the last statement in the block. */
+ if (fallthru_edge)
+ {
+ gimple_seq fallthru_seq = gimple_seq_alloc_with_stmt (stmt);
+ gimple_stmt_iterator fallthru_gsi = gsi_start (fallthru_seq);
+ expand_assign_tm (region, &fallthru_gsi);
+ gsi_insert_seq_on_edge (fallthru_edge, fallthru_seq);
+ pending_edge_inserts_p = true;
+ }
+ else
+ {
+ gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
+ expand_assign_tm (region, gsi);
+ }
+
+ transaction_subcode_ior (region, GTMA_HAVE_STORE);
+ }
+
+ return retval;
+}
+
+
+/* Expand all statements in BB as appropriate for being inside
+ a transaction. */
+
+static void
+expand_block_tm (struct tm_region *region, basic_block bb)
+{
+ gimple_stmt_iterator gsi;
+
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
+ {
+ gimple stmt = gsi_stmt (gsi);
+ switch (gimple_code (stmt))
+ {
+ case GIMPLE_ASSIGN:
+ /* Only memory reads/writes need to be instrumented. */
+ if (gimple_assign_single_p (stmt))
+ {
+ expand_assign_tm (region, &gsi);
+ continue;
+ }
+ break;
+
+ case GIMPLE_CALL:
+ if (expand_call_tm (region, &gsi))
+ return;
+ break;
+
+ case GIMPLE_ASM:
+ gcc_unreachable ();
+
+ default:
+ break;
+ }
+ if (!gsi_end_p (gsi))
+ gsi_next (&gsi);
+ }
+}
+
+/* Return the list of basic-blocks in REGION.
+
+ STOP_AT_IRREVOCABLE_P is true if caller is uninterested in blocks
+ following a TM_IRREVOCABLE call. */
+
+static VEC (basic_block, heap) *
+get_tm_region_blocks (basic_block entry_block,
+ bitmap exit_blocks,
+ bitmap irr_blocks,
+ bitmap all_region_blocks,
+ bool stop_at_irrevocable_p)
+{
+ VEC(basic_block, heap) *bbs = NULL;
+ unsigned i;
+ edge e;
+ edge_iterator ei;
+ bitmap visited_blocks = BITMAP_ALLOC (NULL);
+
+ i = 0;
+ VEC_safe_push (basic_block, heap, bbs, entry_block);
+ bitmap_set_bit (visited_blocks, entry_block->index);
+
+ do
+ {
+ basic_block bb = VEC_index (basic_block, bbs, i++);
+
+ if (exit_blocks &&
+ bitmap_bit_p (exit_blocks, bb->index))
+ continue;
+
+ if (stop_at_irrevocable_p
+ && irr_blocks
+ && bitmap_bit_p (irr_blocks, bb->index))
+ continue;
+
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if (!bitmap_bit_p (visited_blocks, e->dest->index))
+ {
+ bitmap_set_bit (visited_blocks, e->dest->index);
+ VEC_safe_push (basic_block, heap, bbs, e->dest);
+ }
+ }
+ while (i < VEC_length (basic_block, bbs));
+
+ if (all_region_blocks)
+ bitmap_ior_into (all_region_blocks, visited_blocks);
+
+ BITMAP_FREE (visited_blocks);
+ return bbs;
+}
+
+/* Entry point to the MARK phase of TM expansion. Here we replace
+ transactional memory statements with calls to builtins, and function
+ calls with their transactional clones (if available). But we don't
+ yet lower GIMPLE_TRANSACTION or add the transaction restart back-edges. */
+
+static unsigned int
+execute_tm_mark (void)
+{
+ struct tm_region *region;
+ basic_block bb;
+ VEC (basic_block, heap) *queue;
+ size_t i;
+
+ queue = VEC_alloc (basic_block, heap, 10);
+ pending_edge_inserts_p = false;
+
+ for (region = all_tm_regions; region ; region = region->next)
+ {
+ tm_log_init ();
+ /* If we have a transaction... */
+ if (region->exit_blocks)
+ {
+ unsigned int subcode
+ = gimple_transaction_subcode (region->transaction_stmt);
+
+ /* Collect a new SUBCODE set, now that optimizations are done... */
+ if (subcode & GTMA_DOES_GO_IRREVOCABLE)
+ subcode &= (GTMA_DECLARATION_MASK | GTMA_DOES_GO_IRREVOCABLE
+ | GTMA_MAY_ENTER_IRREVOCABLE);
+ else
+ subcode &= GTMA_DECLARATION_MASK;
+ gimple_transaction_set_subcode (region->transaction_stmt, subcode);
+ }
+
+ queue = get_tm_region_blocks (region->entry_block,
+ region->exit_blocks,
+ region->irr_blocks,
+ NULL,
+ /*stop_at_irr_p=*/true);
+ for (i = 0; VEC_iterate (basic_block, queue, i, bb); ++i)
+ expand_block_tm (region, bb);
+ VEC_free (basic_block, heap, queue);
+
+ tm_log_emit ();
+ }
+
+ if (pending_edge_inserts_p)
+ gsi_commit_edge_inserts ();
+ return 0;
+}
+
+struct gimple_opt_pass pass_tm_mark =
+{
+ {
+ GIMPLE_PASS,
+ "tmmark", /* name */
+ NULL, /* gate */
+ execute_tm_mark, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_TRANS_MEM, /* tv_id */
+ PROP_ssa | PROP_cfg, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_update_ssa
+ | TODO_verify_ssa
+ | TODO_dump_func, /* todo_flags_finish */
+ }
+};
+
+/* Create an abnormal call edge from BB to the first block of the region
+ represented by STATE. Also record the edge in the TM_RESTART map. */
+
+static inline void
+make_tm_edge (gimple stmt, basic_block bb, struct tm_region *region)
+{
+ void **slot;
+ struct tm_restart_node *n, dummy;
+
+ if (cfun->gimple_df->tm_restart == NULL)
+ cfun->gimple_df->tm_restart = htab_create_ggc (31, struct_ptr_hash,
+ struct_ptr_eq, ggc_free);
+
+ dummy.stmt = stmt;
+ dummy.label_or_list = gimple_block_label (region->entry_block);
+ slot = htab_find_slot (cfun->gimple_df->tm_restart, &dummy, INSERT);
+ n = (struct tm_restart_node *) *slot;
+ if (n == NULL)
+ {
+ n = ggc_alloc_tm_restart_node ();
+ *n = dummy;
+ }
+ else
+ {
+ tree old = n->label_or_list;
+ if (TREE_CODE (old) == LABEL_DECL)
+ old = tree_cons (NULL, old, NULL);
+ n->label_or_list = tree_cons (NULL, dummy.label_or_list, old);
+ }
+
+ make_edge (bb, region->entry_block, EDGE_ABNORMAL | EDGE_ABNORMAL_CALL);
+}
+
+
+/* Split block BB as necessary for every builtin function we added, and
+ wire up the abnormal back edges implied by the transaction restart. */
+
+static void
+expand_block_edges (struct tm_region *region, basic_block bb)
+{
+ gimple_stmt_iterator gsi;
+
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
+ {
+ gimple stmt = gsi_stmt (gsi);
+
+ /* ??? TM_COMMIT (and any other tm builtin function) in a nested
+ transaction has an abnormal edge back to the outer-most transaction
+ (there are no nested retries), while a TM_ABORT also has an abnormal
+ backedge to the inner-most transaction. We haven't actually saved
+ the inner-most transaction here. We should be able to get to it
+ via the region_nr saved on STMT, and read the transaction_stmt from
+ that, and find the first region block from there. */
+ /* ??? Shouldn't we split for any non-pure, non-irrevocable function? */
+ if (gimple_code (stmt) == GIMPLE_CALL
+ && (gimple_call_flags (stmt) & ECF_TM_BUILTIN) != 0)
+ {
+ if (gsi_one_before_end_p (gsi))
+ make_tm_edge (stmt, bb, region);
+ else
+ {
+ edge e = split_block (bb, stmt);
+ make_tm_edge (stmt, bb, region);
+ bb = e->dest;
+ gsi = gsi_start_bb (bb);
+ }
+
+ /* Delete any tail-call annotation that may have been added.
+ The tail-call pass may have mis-identified the commit as being
+ a candidate because we had not yet added this restart edge. */
+ gimple_call_set_tail (stmt, false);
+ }
+
+ gsi_next (&gsi);
+ }
+}
+
+/* Expand the GIMPLE_TRANSACTION statement into the STM library call. */
+
+static void
+expand_transaction (struct tm_region *region)
+{
+ tree status, tm_start;
+ basic_block atomic_bb, slice_bb;
+ gimple_stmt_iterator gsi;
+ tree t1, t2;
+ gimple g;
+ int flags, subcode;
+
+ tm_start = builtin_decl_explicit (BUILT_IN_TM_START);
+ status = make_rename_temp (TREE_TYPE (TREE_TYPE (tm_start)), "tm_state");
+
+ /* ??? There are plenty of bits here we're not computing. */
+ subcode = gimple_transaction_subcode (region->transaction_stmt);
+ if (subcode & GTMA_DOES_GO_IRREVOCABLE)
+ flags = PR_DOESGOIRREVOCABLE | PR_UNINSTRUMENTEDCODE;
+ else
+ flags = PR_INSTRUMENTEDCODE;
+ if ((subcode & GTMA_MAY_ENTER_IRREVOCABLE) == 0)
+ flags |= PR_HASNOIRREVOCABLE;
+ /* If the transaction does not have an abort in lexical scope and is not
+ marked as an outer transaction, then it will never abort. */
+ if ((subcode & GTMA_HAVE_ABORT) == 0
+ && (subcode & GTMA_IS_OUTER) == 0)
+ flags |= PR_HASNOABORT;
+ if ((subcode & GTMA_HAVE_STORE) == 0)
+ flags |= PR_READONLY;
+ t2 = build_int_cst (TREE_TYPE (status), flags);
+ g = gimple_build_call (tm_start, 1, t2);
+ gimple_call_set_lhs (g, status);
+ gimple_set_location (g, gimple_location (region->transaction_stmt));
+
+ atomic_bb = gimple_bb (region->transaction_stmt);
+
+ if (!VEC_empty (tree, tm_log_save_addresses))
+ tm_log_emit_saves (region->entry_block, atomic_bb);
+
+ gsi = gsi_last_bb (atomic_bb);
+ gsi_insert_before (&gsi, g, GSI_SAME_STMT);
+ gsi_remove (&gsi, true);
+
+ if (!VEC_empty (tree, tm_log_save_addresses))
+ region->entry_block =
+ tm_log_emit_save_or_restores (region->entry_block,
+ A_RESTORELIVEVARIABLES,
+ status,
+ tm_log_emit_restores,
+ atomic_bb,
+ FALLTHRU_EDGE (atomic_bb),
+ &slice_bb);
+ else
+ slice_bb = atomic_bb;
+
+ /* If we have an ABORT statement, create a test following the start
+ call to perform the abort. */
+ if (gimple_transaction_label (region->transaction_stmt))
+ {
+ edge e;
+ basic_block test_bb;
+
+ test_bb = create_empty_bb (slice_bb);
+ if (VEC_empty (tree, tm_log_save_addresses))
+ region->entry_block = test_bb;
+ gsi = gsi_last_bb (test_bb);
+
+ t1 = make_rename_temp (TREE_TYPE (status), NULL);
+ t2 = build_int_cst (TREE_TYPE (status), A_ABORTTRANSACTION);
+ g = gimple_build_assign_with_ops (BIT_AND_EXPR, t1, status, t2);
+ gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
+
+ t2 = build_int_cst (TREE_TYPE (status), 0);
+ g = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
+ gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
+
+ e = FALLTHRU_EDGE (slice_bb);
+ redirect_edge_pred (e, test_bb);
+ e->flags = EDGE_FALSE_VALUE;
+ e->probability = PROB_ALWAYS - PROB_VERY_UNLIKELY;
+
+ e = BRANCH_EDGE (atomic_bb);
+ redirect_edge_pred (e, test_bb);
+ e->flags = EDGE_TRUE_VALUE;
+ e->probability = PROB_VERY_UNLIKELY;
+
+ e = make_edge (slice_bb, test_bb, EDGE_FALLTHRU);
+ }
+
+ /* If we've no abort, but we do have PHIs at the beginning of the atomic
+ region, that means we've a loop at the beginning of the atomic region
+ that shares the first block. This can cause problems with the abnormal
+ edges we're about to add for the transaction restart. Solve this by
+ adding a new empty block to receive the abnormal edges. */
+ else if (phi_nodes (region->entry_block))
+ {
+ edge e;
+ basic_block empty_bb;
+
+ region->entry_block = empty_bb = create_empty_bb (atomic_bb);
+
+ e = FALLTHRU_EDGE (atomic_bb);
+ redirect_edge_pred (e, empty_bb);
+
+ e = make_edge (atomic_bb, empty_bb, EDGE_FALLTHRU);
+ }
+
+ /* The GIMPLE_TRANSACTION statement no longer exists. */
+ region->transaction_stmt = NULL;
+}
+
+static void expand_regions (struct tm_region *);
+
+/* Helper function for expand_regions. Expand REGION and recurse to
+ the inner region. */
+
+static void
+expand_regions_1 (struct tm_region *region)
+{
+ if (region->exit_blocks)
+ {
+ unsigned int i;
+ basic_block bb;
+ VEC (basic_block, heap) *queue;
+
+ /* Collect the set of blocks in this region. Do this before
+ splitting edges, so that we don't have to play with the
+ dominator tree in the middle. */
+ queue = get_tm_region_blocks (region->entry_block,
+ region->exit_blocks,
+ region->irr_blocks,
+ NULL,
+ /*stop_at_irr_p=*/false);
+ expand_transaction (region);
+ for (i = 0; VEC_iterate (basic_block, queue, i, bb); ++i)
+ expand_block_edges (region, bb);
+ VEC_free (basic_block, heap, queue);
+ }
+ if (region->inner)
+ expand_regions (region->inner);
+}
+
+/* Expand regions starting at REGION. */
+
+static void
+expand_regions (struct tm_region *region)
+{
+ while (region)
+ {
+ expand_regions_1 (region);
+ region = region->next;
+ }
+}
+
+/* Entry point to the final expansion of transactional nodes. */
+
+static unsigned int
+execute_tm_edges (void)
+{
+ expand_regions (all_tm_regions);
+ tm_log_delete ();
+
+ /* We've got to release the dominance info now, to indicate that it
+ must be rebuilt completely. Otherwise we'll crash trying to update
+ the SSA web in the TODO section following this pass. */
+ free_dominance_info (CDI_DOMINATORS);
+ bitmap_obstack_release (&tm_obstack);
+ all_tm_regions = NULL;
+
+ return 0;
+}
+
+struct gimple_opt_pass pass_tm_edges =
+{
+ {
+ GIMPLE_PASS,
+ "tmedge", /* name */
+ NULL, /* gate */
+ execute_tm_edges, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_TRANS_MEM, /* tv_id */
+ PROP_ssa | PROP_cfg, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_update_ssa
+ | TODO_verify_ssa
+ | TODO_dump_func, /* todo_flags_finish */
+ }
+};
+
+/* A unique TM memory operation. */
+typedef struct tm_memop
+{
+ /* Unique ID that all memory operations to the same location have. */
+ unsigned int value_id;
+ /* Address of load/store. */
+ tree addr;
+} *tm_memop_t;
+
+/* Sets for solving data flow equations in the memory optimization pass. */
+struct tm_memopt_bitmaps
+{
+ /* Stores available to this BB upon entry. Basically, stores that
+ dominate this BB. */
+ bitmap store_avail_in;
+ /* Stores available at the end of this BB. */
+ bitmap store_avail_out;
+ bitmap store_antic_in;
+ bitmap store_antic_out;
+ /* Reads available to this BB upon entry. Basically, reads that
+ dominate this BB. */
+ bitmap read_avail_in;
+ /* Reads available at the end of this BB. */
+ bitmap read_avail_out;
+ /* Reads performed in this BB. */
+ bitmap read_local;
+ /* Writes performed in this BB. */
+ bitmap store_local;
+
+ /* Temporary storage for pass. */
+ /* Is the current BB in the worklist? */
+ bool avail_in_worklist_p;
+ /* Have we visited this BB? */
+ bool visited_p;
+};
+
+static bitmap_obstack tm_memopt_obstack;
+
+/* Unique counter for TM loads and stores. Loads and stores of the
+ same address get the same ID. */
+static unsigned int tm_memopt_value_id;
+static htab_t tm_memopt_value_numbers;
+
+#define STORE_AVAIL_IN(BB) \
+ ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_in
+#define STORE_AVAIL_OUT(BB) \
+ ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_out
+#define STORE_ANTIC_IN(BB) \
+ ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_in
+#define STORE_ANTIC_OUT(BB) \
+ ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_out
+#define READ_AVAIL_IN(BB) \
+ ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_in
+#define READ_AVAIL_OUT(BB) \
+ ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_out
+#define READ_LOCAL(BB) \
+ ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_local
+#define STORE_LOCAL(BB) \
+ ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_local
+#define AVAIL_IN_WORKLIST_P(BB) \
+ ((struct tm_memopt_bitmaps *) ((BB)->aux))->avail_in_worklist_p
+#define BB_VISITED_P(BB) \
+ ((struct tm_memopt_bitmaps *) ((BB)->aux))->visited_p
+
+/* Htab support. Return a hash value for a `tm_memop'. */
+static hashval_t
+tm_memop_hash (const void *p)
+{
+ const struct tm_memop *mem = (const struct tm_memop *) p;
+ tree addr = mem->addr;
+ /* We drill down to the SSA_NAME/DECL for the hash, but equality is
+ actually done with operand_equal_p (see tm_memop_eq). */
+ if (TREE_CODE (addr) == ADDR_EXPR)
+ addr = TREE_OPERAND (addr, 0);
+ return iterative_hash_expr (addr, 0);
+}
+
+/* Htab support. Return true if two tm_memop's are the same. */
+static int
+tm_memop_eq (const void *p1, const void *p2)
+{
+ const struct tm_memop *mem1 = (const struct tm_memop *) p1;
+ const struct tm_memop *mem2 = (const struct tm_memop *) p2;
+
+ return operand_equal_p (mem1->addr, mem2->addr, 0);
+}
+
+/* Given a TM load/store in STMT, return the value number for the address
+ it accesses. */
+
+static unsigned int
+tm_memopt_value_number (gimple stmt, enum insert_option op)
+{
+ struct tm_memop tmpmem, *mem;
+ void **slot;
+
+ gcc_assert (is_tm_load (stmt) || is_tm_store (stmt));
+ tmpmem.addr = gimple_call_arg (stmt, 0);
+ slot = htab_find_slot (tm_memopt_value_numbers, &tmpmem, op);
+ if (*slot)
+ mem = (struct tm_memop *) *slot;
+ else if (op == INSERT)
+ {
+ mem = XNEW (struct tm_memop);
+ *slot = mem;
+ mem->value_id = tm_memopt_value_id++;
+ mem->addr = tmpmem.addr;
+ }
+ else
+ gcc_unreachable ();
+ return mem->value_id;
+}
+
+/* Accumulate TM memory operations in BB into STORE_LOCAL and READ_LOCAL. */
+
+static void
+tm_memopt_accumulate_memops (basic_block bb)
+{
+ gimple_stmt_iterator gsi;
+
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple stmt = gsi_stmt (gsi);
+ bitmap bits;
+ unsigned int loc;
+
+ if (is_tm_store (stmt))
+ bits = STORE_LOCAL (bb);
+ else if (is_tm_load (stmt))
+ bits = READ_LOCAL (bb);
+ else
+ continue;
+
+ loc = tm_memopt_value_number (stmt, INSERT);
+ bitmap_set_bit (bits, loc);
+ if (dump_file)
+ {
+ fprintf (dump_file, "TM memopt (%s): value num=%d, BB=%d, addr=",
+ is_tm_load (stmt) ? "LOAD" : "STORE", loc,
+ gimple_bb (stmt)->index);
+ print_generic_expr (dump_file, gimple_call_arg (stmt, 0), 0);
+ fprintf (dump_file, "\n");
+ }
+ }
+}
+
+/* Prettily dump one of the memopt sets. BITS is the bitmap to dump. */
+
+static void
+dump_tm_memopt_set (const char *set_name, bitmap bits)
+{
+ unsigned i;
+ bitmap_iterator bi;
+ const char *comma = "";
+
+ fprintf (dump_file, "TM memopt: %s: [", set_name);
+ EXECUTE_IF_SET_IN_BITMAP (bits, 0, i, bi)
+ {
+ htab_iterator hi;
+ struct tm_memop *mem;
+
+ /* Yeah, yeah, yeah. Whatever. This is just for debugging. */
+ FOR_EACH_HTAB_ELEMENT (tm_memopt_value_numbers, mem, tm_memop_t, hi)
+ if (mem->value_id == i)
+ break;
+ gcc_assert (mem->value_id == i);
+ fprintf (dump_file, "%s", comma);
+ comma = ", ";
+ print_generic_expr (dump_file, mem->addr, 0);
+ }
+ fprintf (dump_file, "]\n");
+}
+
+/* Prettily dump all of the memopt sets in BLOCKS. */
+
+static void
+dump_tm_memopt_sets (VEC (basic_block, heap) *blocks)
+{
+ size_t i;
+ basic_block bb;
+
+ for (i = 0; VEC_iterate (basic_block, blocks, i, bb); ++i)
+ {
+ fprintf (dump_file, "------------BB %d---------\n", bb->index);
+ dump_tm_memopt_set ("STORE_LOCAL", STORE_LOCAL (bb));
+ dump_tm_memopt_set ("READ_LOCAL", READ_LOCAL (bb));
+ dump_tm_memopt_set ("STORE_AVAIL_IN", STORE_AVAIL_IN (bb));
+ dump_tm_memopt_set ("STORE_AVAIL_OUT", STORE_AVAIL_OUT (bb));
+ dump_tm_memopt_set ("READ_AVAIL_IN", READ_AVAIL_IN (bb));
+ dump_tm_memopt_set ("READ_AVAIL_OUT", READ_AVAIL_OUT (bb));
+ }
+}
+
+/* Compute {STORE,READ}_AVAIL_IN for the basic block BB. */
+
+static void
+tm_memopt_compute_avin (basic_block bb)
+{
+ edge e;
+ unsigned ix;
+
+ /* Seed with the AVOUT of any predecessor. */
+ for (ix = 0; ix < EDGE_COUNT (bb->preds); ix++)
+ {
+ e = EDGE_PRED (bb, ix);
+ /* Make sure we have already visited this BB, and is thus
+ initialized.
+
+ If e->src->aux is NULL, this predecessor is actually on an
+ enclosing transaction. We only care about the current
+ transaction, so ignore it. */
+ if (e->src->aux && BB_VISITED_P (e->src))
+ {
+ bitmap_copy (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src));
+ bitmap_copy (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src));
+ break;
+ }
+ }
+
+ for (; ix < EDGE_COUNT (bb->preds); ix++)
+ {
+ e = EDGE_PRED (bb, ix);
+ if (e->src->aux && BB_VISITED_P (e->src))
+ {
+ bitmap_and_into (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src));
+ bitmap_and_into (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src));
+ }
+ }
+
+ BB_VISITED_P (bb) = true;
+}
+
+/* Compute the STORE_ANTIC_IN for the basic block BB. */
+
+static void
+tm_memopt_compute_antin (basic_block bb)
+{
+ edge e;
+ unsigned ix;
+
+ /* Seed with the ANTIC_OUT of any successor. */
+ for (ix = 0; ix < EDGE_COUNT (bb->succs); ix++)
+ {
+ e = EDGE_SUCC (bb, ix);
+ /* Make sure we have already visited this BB, and is thus
+ initialized. */
+ if (BB_VISITED_P (e->dest))
+ {
+ bitmap_copy (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest));
+ break;
+ }
+ }
+
+ for (; ix < EDGE_COUNT (bb->succs); ix++)
+ {
+ e = EDGE_SUCC (bb, ix);
+ if (BB_VISITED_P (e->dest))
+ bitmap_and_into (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest));
+ }
+
+ BB_VISITED_P (bb) = true;
+}
+
+/* Compute the AVAIL sets for every basic block in BLOCKS.
+
+ We compute {STORE,READ}_AVAIL_{OUT,IN} as follows:
+
+ AVAIL_OUT[bb] = union (AVAIL_IN[bb], LOCAL[bb])
+ AVAIL_IN[bb] = intersect (AVAIL_OUT[predecessors])
+
+ This is basically what we do in lcm's compute_available(), but here
+ we calculate two sets of sets (one for STOREs and one for READs),
+ and we work on a region instead of the entire CFG.
+
+ REGION is the TM region.
+ BLOCKS are the basic blocks in the region. */
+
+static void
+tm_memopt_compute_available (struct tm_region *region,
+ VEC (basic_block, heap) *blocks)
+{
+ edge e;
+ basic_block *worklist, *qin, *qout, *qend, bb;
+ unsigned int qlen, i;
+ edge_iterator ei;
+ bool changed;
+
+ /* Allocate a worklist array/queue. Entries are only added to the
+ list if they were not already on the list. So the size is
+ bounded by the number of basic blocks in the region. */
+ qlen = VEC_length (basic_block, blocks) - 1;
+ qin = qout = worklist =
+ XNEWVEC (basic_block, qlen);
+
+ /* Put every block in the region on the worklist. */
+ for (i = 0; VEC_iterate (basic_block, blocks, i, bb); ++i)
+ {
+ /* Seed AVAIL_OUT with the LOCAL set. */
+ bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_LOCAL (bb));
+ bitmap_ior_into (READ_AVAIL_OUT (bb), READ_LOCAL (bb));
+
+ AVAIL_IN_WORKLIST_P (bb) = true;
+ /* No need to insert the entry block, since it has an AVIN of
+ null, and an AVOUT that has already been seeded in. */
+ if (bb != region->entry_block)
+ *qin++ = bb;
+ }
+
+ /* The entry block has been initialized with the local sets. */
+ BB_VISITED_P (region->entry_block) = true;
+
+ qin = worklist;
+ qend = &worklist[qlen];
+
+ /* Iterate until the worklist is empty. */
+ while (qlen)
+ {
+ /* Take the first entry off the worklist. */
+ bb = *qout++;
+ qlen--;
+
+ if (qout >= qend)
+ qout = worklist;
+
+ /* This block can be added to the worklist again if necessary. */
+ AVAIL_IN_WORKLIST_P (bb) = false;
+ tm_memopt_compute_avin (bb);
+
+ /* Note: We do not add the LOCAL sets here because we already
+ seeded the AVAIL_OUT sets with them. */
+ changed = bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_AVAIL_IN (bb));
+ changed |= bitmap_ior_into (READ_AVAIL_OUT (bb), READ_AVAIL_IN (bb));
+ if (changed
+ && (region->exit_blocks == NULL
+ || !bitmap_bit_p (region->exit_blocks, bb->index)))
+ /* If the out state of this block changed, then we need to add
+ its successors to the worklist if they are not already in. */
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if (!AVAIL_IN_WORKLIST_P (e->dest) && e->dest != EXIT_BLOCK_PTR)
+ {
+ *qin++ = e->dest;
+ AVAIL_IN_WORKLIST_P (e->dest) = true;
+ qlen++;
+
+ if (qin >= qend)
+ qin = worklist;
+ }
+ }
+
+ free (worklist);
+
+ if (dump_file)
+ dump_tm_memopt_sets (blocks);
+}
+
+/* Compute ANTIC sets for every basic block in BLOCKS.
+
+ We compute STORE_ANTIC_OUT as follows:
+
+ STORE_ANTIC_OUT[bb] = union(STORE_ANTIC_IN[bb], STORE_LOCAL[bb])
+ STORE_ANTIC_IN[bb] = intersect(STORE_ANTIC_OUT[successors])
+
+ REGION is the TM region.
+ BLOCKS are the basic blocks in the region. */
+
+static void
+tm_memopt_compute_antic (struct tm_region *region,
+ VEC (basic_block, heap) *blocks)
+{
+ edge e;
+ basic_block *worklist, *qin, *qout, *qend, bb;
+ unsigned int qlen;
+ int i;
+ edge_iterator ei;
+
+ /* Allocate a worklist array/queue. Entries are only added to the
+ list if they were not already on the list. So the size is
+ bounded by the number of basic blocks in the region. */
+ qin = qout = worklist =
+ XNEWVEC (basic_block, VEC_length (basic_block, blocks));
+
+ for (qlen = 0, i = VEC_length (basic_block, blocks) - 1; i >= 0; --i)
+ {
+ bb = VEC_index (basic_block, blocks, i);
+
+ /* Seed ANTIC_OUT with the LOCAL set. */
+ bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_LOCAL (bb));
+
+ /* Put every block in the region on the worklist. */
+ AVAIL_IN_WORKLIST_P (bb) = true;
+ /* No need to insert exit blocks, since their ANTIC_IN is NULL,
+ and their ANTIC_OUT has already been seeded in. */
+ if (region->exit_blocks
+ && !bitmap_bit_p (region->exit_blocks, bb->index))
+ {
+ qlen++;
+ *qin++ = bb;
+ }
+ }
+
+ /* The exit blocks have been initialized with the local sets. */
+ if (region->exit_blocks)
+ {
+ unsigned int i;
+ bitmap_iterator bi;
+ EXECUTE_IF_SET_IN_BITMAP (region->exit_blocks, 0, i, bi)
+ BB_VISITED_P (BASIC_BLOCK (i)) = true;
+ }
+
+ qin = worklist;
+ qend = &worklist[qlen];
+
+ /* Iterate until the worklist is empty. */
+ while (qlen)
+ {
+ /* Take the first entry off the worklist. */
+ bb = *qout++;
+ qlen--;
+
+ if (qout >= qend)
+ qout = worklist;
+
+ /* This block can be added to the worklist again if necessary. */
+ AVAIL_IN_WORKLIST_P (bb) = false;
+ tm_memopt_compute_antin (bb);
+
+ /* Note: We do not add the LOCAL sets here because we already
+ seeded the ANTIC_OUT sets with them. */
+ if (bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_ANTIC_IN (bb))
+ && bb != region->entry_block)
+ /* If the out state of this block changed, then we need to add
+ its predecessors to the worklist if they are not already in. */
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ if (!AVAIL_IN_WORKLIST_P (e->src))
+ {
+ *qin++ = e->src;
+ AVAIL_IN_WORKLIST_P (e->src) = true;
+ qlen++;
+
+ if (qin >= qend)
+ qin = worklist;
+ }
+ }
+
+ free (worklist);
+
+ if (dump_file)
+ dump_tm_memopt_sets (blocks);
+}
+
+/* Offsets of load variants from TM_LOAD. For example,
+ BUILT_IN_TM_LOAD_RAR* is an offset of 1 from BUILT_IN_TM_LOAD*.
+ See gtm-builtins.def. */
+#define TRANSFORM_RAR 1
+#define TRANSFORM_RAW 2
+#define TRANSFORM_RFW 3
+/* Offsets of store variants from TM_STORE. */
+#define TRANSFORM_WAR 1
+#define TRANSFORM_WAW 2
+
+/* Inform about a load/store optimization. */
+
+static void
+dump_tm_memopt_transform (gimple stmt)
+{
+ if (dump_file)
+ {
+ fprintf (dump_file, "TM memopt: transforming: ");
+ print_gimple_stmt (dump_file, stmt, 0, 0);
+ fprintf (dump_file, "\n");
+ }
+}
+
+/* Perform a read/write optimization. Replaces the TM builtin in STMT
+ by a builtin that is OFFSET entries down in the builtins table in
+ gtm-builtins.def. */
+
+static void
+tm_memopt_transform_stmt (unsigned int offset,
+ gimple stmt,
+ gimple_stmt_iterator *gsi)
+{
+ tree fn = gimple_call_fn (stmt);
+ gcc_assert (TREE_CODE (fn) == ADDR_EXPR);
+ TREE_OPERAND (fn, 0)
+ = builtin_decl_explicit ((enum built_in_function)
+ (DECL_FUNCTION_CODE (TREE_OPERAND (fn, 0))
+ + offset));
+ gimple_call_set_fn (stmt, fn);
+ gsi_replace (gsi, stmt, true);
+ dump_tm_memopt_transform (stmt);
+}
+
+/* Perform the actual TM memory optimization transformations in the
+ basic blocks in BLOCKS. */
+
+static void
+tm_memopt_transform_blocks (VEC (basic_block, heap) *blocks)
+{
+ size_t i;
+ basic_block bb;
+ gimple_stmt_iterator gsi;
+
+ for (i = 0; VEC_iterate (basic_block, blocks, i, bb); ++i)
+ {
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple stmt = gsi_stmt (gsi);
+ bitmap read_avail = READ_AVAIL_IN (bb);
+ bitmap store_avail = STORE_AVAIL_IN (bb);
+ bitmap store_antic = STORE_ANTIC_OUT (bb);
+ unsigned int loc;
+
+ if (is_tm_simple_load (stmt))
+ {
+ loc = tm_memopt_value_number (stmt, NO_INSERT);
+ if (store_avail && bitmap_bit_p (store_avail, loc))
+ tm_memopt_transform_stmt (TRANSFORM_RAW, stmt, &gsi);
+ else if (store_antic && bitmap_bit_p (store_antic, loc))
+ {
+ tm_memopt_transform_stmt (TRANSFORM_RFW, stmt, &gsi);
+ bitmap_set_bit (store_avail, loc);
+ }
+ else if (read_avail && bitmap_bit_p (read_avail, loc))
+ tm_memopt_transform_stmt (TRANSFORM_RAR, stmt, &gsi);
+ else
+ bitmap_set_bit (read_avail, loc);
+ }
+ else if (is_tm_simple_store (stmt))
+ {
+ loc = tm_memopt_value_number (stmt, NO_INSERT);
+ if (store_avail && bitmap_bit_p (store_avail, loc))
+ tm_memopt_transform_stmt (TRANSFORM_WAW, stmt, &gsi);
+ else
+ {
+ if (read_avail && bitmap_bit_p (read_avail, loc))
+ tm_memopt_transform_stmt (TRANSFORM_WAR, stmt, &gsi);
+ bitmap_set_bit (store_avail, loc);
+ }
+ }
+ }
+ }
+}
+
+/* Return a new set of bitmaps for a BB. */
+
+static struct tm_memopt_bitmaps *
+tm_memopt_init_sets (void)
+{
+ struct tm_memopt_bitmaps *b
+ = XOBNEW (&tm_memopt_obstack.obstack, struct tm_memopt_bitmaps);
+ b->store_avail_in = BITMAP_ALLOC (&tm_memopt_obstack);
+ b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
+ b->store_antic_in = BITMAP_ALLOC (&tm_memopt_obstack);
+ b->store_antic_out = BITMAP_ALLOC (&tm_memopt_obstack);
+ b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
+ b->read_avail_in = BITMAP_ALLOC (&tm_memopt_obstack);
+ b->read_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
+ b->read_local = BITMAP_ALLOC (&tm_memopt_obstack);
+ b->store_local = BITMAP_ALLOC (&tm_memopt_obstack);
+ return b;
+}
+
+/* Free sets computed for each BB. */
+
+static void
+tm_memopt_free_sets (VEC (basic_block, heap) *blocks)
+{
+ size_t i;
+ basic_block bb;
+
+ for (i = 0; VEC_iterate (basic_block, blocks, i, bb); ++i)
+ bb->aux = NULL;
+}
+
+/* Clear the visited bit for every basic block in BLOCKS. */
+
+static void
+tm_memopt_clear_visited (VEC (basic_block, heap) *blocks)
+{
+ size_t i;
+ basic_block bb;
+
+ for (i = 0; VEC_iterate (basic_block, blocks, i, bb); ++i)
+ BB_VISITED_P (bb) = false;
+}
+
+/* Replace TM load/stores with hints for the runtime. We handle
+ things like read-after-write, write-after-read, read-after-read,
+ read-for-write, etc. */
+
+static unsigned int
+execute_tm_memopt (void)
+{
+ struct tm_region *region;
+ VEC (basic_block, heap) *bbs;
+
+ tm_memopt_value_id = 0;
+ tm_memopt_value_numbers = htab_create (10, tm_memop_hash, tm_memop_eq, free);
+
+ for (region = all_tm_regions; region; region = region->next)
+ {
+ /* All the TM stores/loads in the current region. */
+ size_t i;
+ basic_block bb;
+
+ bitmap_obstack_initialize (&tm_memopt_obstack);
+
+ /* Save all BBs for the current region. */
+ bbs = get_tm_region_blocks (region->entry_block,
+ region->exit_blocks,
+ region->irr_blocks,
+ NULL,
+ false);
+
+ /* Collect all the memory operations. */
+ for (i = 0; VEC_iterate (basic_block, bbs, i, bb); ++i)
+ {
+ bb->aux = tm_memopt_init_sets ();
+ tm_memopt_accumulate_memops (bb);
+ }
+
+ /* Solve data flow equations and transform each block accordingly. */
+ tm_memopt_clear_visited (bbs);
+ tm_memopt_compute_available (region, bbs);
+ tm_memopt_clear_visited (bbs);
+ tm_memopt_compute_antic (region, bbs);
+ tm_memopt_transform_blocks (bbs);
+
+ tm_memopt_free_sets (bbs);
+ VEC_free (basic_block, heap, bbs);
+ bitmap_obstack_release (&tm_memopt_obstack);
+ htab_empty (tm_memopt_value_numbers);
+ }
+
+ htab_delete (tm_memopt_value_numbers);
+ return 0;
+}
+
+static bool
+gate_tm_memopt (void)
+{
+ return flag_tm && optimize > 0;
+}
+
+struct gimple_opt_pass pass_tm_memopt =
+{
+ {
+ GIMPLE_PASS,
+ "tmmemopt", /* name */
+ gate_tm_memopt, /* gate */
+ execute_tm_memopt, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_TRANS_MEM, /* tv_id */
+ PROP_ssa | PROP_cfg, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_dump_func, /* todo_flags_finish */
+ }
+};
+
+
+/* Interprocedual analysis for the creation of transactional clones.
+ The aim of this pass is to find which functions are referenced in
+ a non-irrevocable transaction context, and for those over which
+ we have control (or user directive), create a version of the
+ function which uses only the transactional interface to reference
+ protected memories. This analysis proceeds in several steps:
+
+ (1) Collect the set of all possible transactional clones:
+
+ (a) For all local public functions marked tm_callable, push
+ it onto the tm_callee queue.
+
+ (b) For all local functions, scan for calls in transaction blocks.
+ Push the caller and callee onto the tm_caller and tm_callee
+ queues. Count the number of callers for each callee.
+
+ (c) For each local function on the callee list, assume we will
+ create a transactional clone. Push *all* calls onto the
+ callee queues; count the number of clone callers separately
+ to the number of original callers.
+
+ (2) Propagate irrevocable status up the dominator tree:
+
+ (a) Any external function on the callee list that is not marked
+ tm_callable is irrevocable. Push all callers of such onto
+ a worklist.
+
+ (b) For each function on the worklist, mark each block that
+ contains an irrevocable call. Use the AND operator to
+ propagate that mark up the dominator tree.
+
+ (c) If we reach the entry block for a possible transactional
+ clone, then the transactional clone is irrevocable, and
+ we should not create the clone after all. Push all
+ callers onto the worklist.
+
+ (d) Place tm_irrevocable calls at the beginning of the relevant
+ blocks. Special case here is the entry block for the entire
+ transaction region; there we mark it GTMA_DOES_GO_IRREVOCABLE for
+ the library to begin the region in serial mode. Decrement
+ the call count for all callees in the irrevocable region.
+
+ (3) Create the transactional clones:
+
+ Any tm_callee that still has a non-zero call count is cloned.
+*/
+
+/* This structure is stored in the AUX field of each cgraph_node. */
+struct tm_ipa_cg_data
+{
+ /* The clone of the function that got created. */
+ struct cgraph_node *clone;
+
+ /* The tm regions in the normal function. */
+ struct tm_region *all_tm_regions;
+
+ /* The blocks of the normal/clone functions that contain irrevocable
+ calls, or blocks that are post-dominated by irrevocable calls. */
+ bitmap irrevocable_blocks_normal;
+ bitmap irrevocable_blocks_clone;
+
+ /* The blocks of the normal function that are involved in transactions. */
+ bitmap transaction_blocks_normal;
+
+ /* The number of callers to the transactional clone of this function
+ from normal and transactional clones respectively. */
+ unsigned tm_callers_normal;
+ unsigned tm_callers_clone;
+
+ /* True if all calls to this function's transactional clone
+ are irrevocable. Also automatically true if the function
+ has no transactional clone. */
+ bool is_irrevocable;
+
+ /* Flags indicating the presence of this function in various queues. */
+ bool in_callee_queue;
+ bool in_worklist;
+
+ /* Flags indicating the kind of scan desired while in the worklist. */
+ bool want_irr_scan_normal;
+};
+
+typedef struct cgraph_node *cgraph_node_p;
+
+DEF_VEC_P (cgraph_node_p);
+DEF_VEC_ALLOC_P (cgraph_node_p, heap);
+
+typedef VEC (cgraph_node_p, heap) *cgraph_node_queue;
+
+/* Return the ipa data associated with NODE, allocating zeroed memory
+ if necessary. */
+
+static struct tm_ipa_cg_data *
+get_cg_data (struct cgraph_node *node)
+{
+ struct tm_ipa_cg_data *d = (struct tm_ipa_cg_data *) node->aux;
+
+ if (d == NULL)
+ {
+ d = (struct tm_ipa_cg_data *)
+ obstack_alloc (&tm_obstack.obstack, sizeof (*d));
+ node->aux = (void *) d;
+ memset (d, 0, sizeof (*d));
+ }
+
+ return d;
+}
+
+/* Add NODE to the end of QUEUE, unless IN_QUEUE_P indicates that
+ it is already present. */
+
+static void
+maybe_push_queue (struct cgraph_node *node,
+ cgraph_node_queue *queue_p, bool *in_queue_p)
+{
+ if (!*in_queue_p)
+ {
+ *in_queue_p = true;
+ VEC_safe_push (cgraph_node_p, heap, *queue_p, node);
+ }
+}
+
+/* A subroutine of ipa_tm_scan_calls_transaction and ipa_tm_scan_calls_clone.
+ Queue all callees within block BB. */
+
+static void
+ipa_tm_scan_calls_block (cgraph_node_queue *callees_p,
+ basic_block bb, bool for_clone)
+{
+ gimple_stmt_iterator gsi;
+
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple stmt = gsi_stmt (gsi);
+ if (is_gimple_call (stmt) && !is_tm_pure_call (stmt))
+ {
+ tree fndecl = gimple_call_fndecl (stmt);
+ if (fndecl)
+ {
+ struct tm_ipa_cg_data *d;
+ unsigned *pcallers;
+ struct cgraph_node *node;
+
+ if (is_tm_ending_fndecl (fndecl))
+ continue;
+ if (find_tm_replacement_function (fndecl))
+ continue;
+
+ node = cgraph_get_node (fndecl);
+ gcc_assert (node != NULL);
+ d = get_cg_data (node);
+
+ pcallers = (for_clone ? &d->tm_callers_clone
+ : &d->tm_callers_normal);
+ *pcallers += 1;
+
+ maybe_push_queue (node, callees_p, &d->in_callee_queue);
+ }
+ }
+ }
+}
+
+/* Scan all calls in NODE that are within a transaction region,
+ and push the resulting nodes into the callee queue. */
+
+static void
+ipa_tm_scan_calls_transaction (struct tm_ipa_cg_data *d,
+ cgraph_node_queue *callees_p)
+{
+ struct tm_region *r;
+
+ d->transaction_blocks_normal = BITMAP_ALLOC (&tm_obstack);
+ d->all_tm_regions = all_tm_regions;
+
+ for (r = all_tm_regions; r; r = r->next)
+ {
+ VEC (basic_block, heap) *bbs;
+ basic_block bb;
+ unsigned i;
+
+ bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks, NULL,
+ d->transaction_blocks_normal, false);
+
+ FOR_EACH_VEC_ELT (basic_block, bbs, i, bb)
+ ipa_tm_scan_calls_block (callees_p, bb, false);
+
+ VEC_free (basic_block, heap, bbs);
+ }
+}
+
+/* Scan all calls in NODE as if this is the transactional clone,
+ and push the destinations into the callee queue. */
+
+static void
+ipa_tm_scan_calls_clone (struct cgraph_node *node,
+ cgraph_node_queue *callees_p)
+{
+ struct function *fn = DECL_STRUCT_FUNCTION (node->decl);
+ basic_block bb;
+
+ FOR_EACH_BB_FN (bb, fn)
+ ipa_tm_scan_calls_block (callees_p, bb, true);
+}
+
+/* The function NODE has been detected to be irrevocable. Push all
+ of its callers onto WORKLIST for the purpose of re-scanning them. */
+
+static void
+ipa_tm_note_irrevocable (struct cgraph_node *node,
+ cgraph_node_queue *worklist_p)
+{
+ struct tm_ipa_cg_data *d = get_cg_data (node);
+ struct cgraph_edge *e;
+
+ d->is_irrevocable = true;
+
+ for (e = node->callers; e ; e = e->next_caller)
+ {
+ basic_block bb;
+
+ /* Don't examine recursive calls. */
+ if (e->caller == node)
+ continue;
+ /* Even if we think we can go irrevocable, believe the user
+ above all. */
+ if (is_tm_safe_or_pure (e->caller->decl))
+ continue;
+
+ d = get_cg_data (e->caller);
+
+ /* Check if the callee is in a transactional region. If so,
+ schedule the function for normal re-scan as well. */
+ bb = gimple_bb (e->call_stmt);
+ gcc_assert (bb != NULL);
+ if (d->transaction_blocks_normal
+ && bitmap_bit_p (d->transaction_blocks_normal, bb->index))
+ d->want_irr_scan_normal = true;
+
+ maybe_push_queue (e->caller, worklist_p, &d->in_worklist);
+ }
+}
+
+/* A subroutine of ipa_tm_scan_irr_blocks; return true iff any statement
+ within the block is irrevocable. */
+
+static bool
+ipa_tm_scan_irr_block (basic_block bb)
+{
+ gimple_stmt_iterator gsi;
+ tree fn;
+
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple stmt = gsi_stmt (gsi);
+ switch (gimple_code (stmt))
+ {
+ case GIMPLE_CALL:
+ if (is_tm_pure_call (stmt))
+ break;
+
+ fn = gimple_call_fn (stmt);
+
+ /* Functions with the attribute are by definition irrevocable. */
+ if (is_tm_irrevocable (fn))
+ return true;
+
+ /* For direct function calls, go ahead and check for replacement
+ functions, or transitive irrevocable functions. For indirect
+ functions, we'll ask the runtime. */
+ if (TREE_CODE (fn) == ADDR_EXPR)
+ {
+ struct tm_ipa_cg_data *d;
+
+ fn = TREE_OPERAND (fn, 0);
+ if (is_tm_ending_fndecl (fn))
+ break;
+ if (find_tm_replacement_function (fn))
+ break;
+
+ d = get_cg_data (cgraph_get_node (fn));
+ if (d->is_irrevocable)
+ return true;
+ }
+ break;
+
+ case GIMPLE_ASM:
+ /* ??? The Approved Method of indicating that an inline
+ assembly statement is not relevant to the transaction
+ is to wrap it in a __tm_waiver block. This is not
+ yet implemented, so we can't check for it. */
+ return true;
+
+ default:
+ break;
+ }
+ }
+
+ return false;
+}
+
+/* For each of the blocks seeded witin PQUEUE, walk the CFG looking
+ for new irrevocable blocks, marking them in NEW_IRR. Don't bother
+ scanning past OLD_IRR or EXIT_BLOCKS. */
+
+static bool
+ipa_tm_scan_irr_blocks (VEC (basic_block, heap) **pqueue, bitmap new_irr,
+ bitmap old_irr, bitmap exit_blocks)
+{
+ bool any_new_irr = false;
+ edge e;
+ edge_iterator ei;
+ bitmap visited_blocks = BITMAP_ALLOC (NULL);
+
+ do
+ {
+ basic_block bb = VEC_pop (basic_block, *pqueue);
+
+ /* Don't re-scan blocks we know already are irrevocable. */
+ if (old_irr && bitmap_bit_p (old_irr, bb->index))
+ continue;
+
+ if (ipa_tm_scan_irr_block (bb))
+ {
+ bitmap_set_bit (new_irr, bb->index);
+ any_new_irr = true;
+ }
+ else if (exit_blocks == NULL || !bitmap_bit_p (exit_blocks, bb->index))
+ {
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if (!bitmap_bit_p (visited_blocks, e->dest->index))
+ {
+ bitmap_set_bit (visited_blocks, e->dest->index);
+ VEC_safe_push (basic_block, heap, *pqueue, e->dest);
+ }
+ }
+ }
+ while (!VEC_empty (basic_block, *pqueue));
+
+ BITMAP_FREE (visited_blocks);
+
+ return any_new_irr;
+}
+
+/* Propagate the irrevocable property both up and down the dominator tree.
+ BB is the current block being scanned; EXIT_BLOCKS are the edges of the
+ TM regions; OLD_IRR are the results of a previous scan of the dominator
+ tree which has been fully propagated; NEW_IRR is the set of new blocks
+ which are gaining the irrevocable property during the current scan. */
+
+static void
+ipa_tm_propagate_irr (basic_block entry_block, bitmap new_irr,
+ bitmap old_irr, bitmap exit_blocks)
+{
+ VEC (basic_block, heap) *bbs;
+ bitmap all_region_blocks;
+
+ /* If this block is in the old set, no need to rescan. */
+ if (old_irr && bitmap_bit_p (old_irr, entry_block->index))
+ return;
+
+ all_region_blocks = BITMAP_ALLOC (&tm_obstack);
+ bbs = get_tm_region_blocks (entry_block, exit_blocks, NULL,
+ all_region_blocks, false);
+ do
+ {
+ basic_block bb = VEC_pop (basic_block, bbs);
+ bool this_irr = bitmap_bit_p (new_irr, bb->index);
+ bool all_son_irr = false;
+ edge_iterator ei;
+ edge e;
+
+ /* Propagate up. If my children are, I am too, but we must have
+ at least one child that is. */
+ if (!this_irr)
+ {
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ {
+ if (!bitmap_bit_p (new_irr, e->dest->index))
+ {
+ all_son_irr = false;
+ break;
+ }
+ else
+ all_son_irr = true;
+ }
+ if (all_son_irr)
+ {
+ /* Add block to new_irr if it hasn't already been processed. */
+ if (!old_irr || !bitmap_bit_p (old_irr, bb->index))
+ {
+ bitmap_set_bit (new_irr, bb->index);
+ this_irr = true;
+ }
+ }
+ }
+
+ /* Propagate down to everyone we immediately dominate. */
+ if (this_irr)
+ {
+ basic_block son;
+ for (son = first_dom_son (CDI_DOMINATORS, bb);
+ son;
+ son = next_dom_son (CDI_DOMINATORS, son))
+ {
+ /* Make sure block is actually in a TM region, and it
+ isn't already in old_irr. */
+ if ((!old_irr || !bitmap_bit_p (old_irr, son->index))
+ && bitmap_bit_p (all_region_blocks, son->index))
+ bitmap_set_bit (new_irr, son->index);
+ }
+ }
+ }
+ while (!VEC_empty (basic_block, bbs));
+
+ BITMAP_FREE (all_region_blocks);
+ VEC_free (basic_block, heap, bbs);
+}
+
+static void
+ipa_tm_decrement_clone_counts (basic_block bb, bool for_clone)
+{
+ gimple_stmt_iterator gsi;
+
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple stmt = gsi_stmt (gsi);
+ if (is_gimple_call (stmt) && !is_tm_pure_call (stmt))
+ {
+ tree fndecl = gimple_call_fndecl (stmt);
+ if (fndecl)
+ {
+ struct tm_ipa_cg_data *d;
+ unsigned *pcallers;
+
+ if (is_tm_ending_fndecl (fndecl))
+ continue;
+ if (find_tm_replacement_function (fndecl))
+ continue;
+
+ d = get_cg_data (cgraph_get_node (fndecl));
+ pcallers = (for_clone ? &d->tm_callers_clone
+ : &d->tm_callers_normal);
+
+ gcc_assert (*pcallers > 0);
+ *pcallers -= 1;
+ }
+ }
+ }
+}
+
+/* (Re-)Scan the transaction blocks in NODE for calls to irrevocable functions,
+ as well as other irrevocable actions such as inline assembly. Mark all
+ such blocks as irrevocable and decrement the number of calls to
+ transactional clones. Return true if, for the transactional clone, the
+ entire function is irrevocable. */
+
+static bool
+ipa_tm_scan_irr_function (struct cgraph_node *node, bool for_clone)
+{
+ struct tm_ipa_cg_data *d;
+ bitmap new_irr, old_irr;
+ VEC (basic_block, heap) *queue;
+ bool ret = false;
+
+ current_function_decl = node->decl;
+ push_cfun (DECL_STRUCT_FUNCTION (node->decl));
+ calculate_dominance_info (CDI_DOMINATORS);
+
+ d = get_cg_data (node);
+ queue = VEC_alloc (basic_block, heap, 10);
+ new_irr = BITMAP_ALLOC (&tm_obstack);
+
+ /* Scan each tm region, propagating irrevocable status through the tree. */
+ if (for_clone)
+ {
+ old_irr = d->irrevocable_blocks_clone;
+ VEC_quick_push (basic_block, queue, single_succ (ENTRY_BLOCK_PTR));
+ if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr, NULL))
+ {
+ ipa_tm_propagate_irr (single_succ (ENTRY_BLOCK_PTR), new_irr,
+ old_irr, NULL);
+ ret = bitmap_bit_p (new_irr, single_succ (ENTRY_BLOCK_PTR)->index);
+ }
+ }
+ else
+ {
+ struct tm_region *region;
+
+ old_irr = d->irrevocable_blocks_normal;
+ for (region = d->all_tm_regions; region; region = region->next)
+ {
+ VEC_quick_push (basic_block, queue, region->entry_block);
+ if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr,
+ region->exit_blocks))
+ ipa_tm_propagate_irr (region->entry_block, new_irr, old_irr,
+ region->exit_blocks);
+ }
+ }
+
+ /* If we found any new irrevocable blocks, reduce the call count for
+ transactional clones within the irrevocable blocks. Save the new
+ set of irrevocable blocks for next time. */
+ if (!bitmap_empty_p (new_irr))
+ {
+ bitmap_iterator bmi;
+ unsigned i;
+
+ EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi)
+ ipa_tm_decrement_clone_counts (BASIC_BLOCK (i), for_clone);
+
+ if (old_irr)
+ {
+ bitmap_ior_into (old_irr, new_irr);
+ BITMAP_FREE (new_irr);
+ }
+ else if (for_clone)
+ d->irrevocable_blocks_clone = new_irr;
+ else
+ d->irrevocable_blocks_normal = new_irr;
+
+ if (dump_file && new_irr)
+ {
+ const char *dname;
+ bitmap_iterator bmi;
+ unsigned i;
+
+ dname = lang_hooks.decl_printable_name (current_function_decl, 2);
+ EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi)
+ fprintf (dump_file, "%s: bb %d goes irrevocable\n", dname, i);
+ }
+ }
+ else
+ BITMAP_FREE (new_irr);
+
+ VEC_free (basic_block, heap, queue);
+ pop_cfun ();
+ current_function_decl = NULL;
+
+ return ret;
+}
+
+/* Return true if, for the transactional clone of NODE, any call
+ may enter irrevocable mode. */
+
+static bool
+ipa_tm_mayenterirr_function (struct cgraph_node *node)
+{
+ struct tm_ipa_cg_data *d = get_cg_data (node);
+ tree decl = node->decl;
+ unsigned flags = flags_from_decl_or_type (decl);
+
+ /* Handle some TM builtins. Ordinarily these aren't actually generated
+ at this point, but handling these functions when written in by the
+ user makes it easier to build unit tests. */
+ if (flags & ECF_TM_BUILTIN)
+ return false;
+
+ /* Filter out all functions that are marked. */
+ if (flags & ECF_TM_PURE)
+ return false;
+ if (is_tm_safe (decl))
+ return false;
+ if (is_tm_irrevocable (decl))
+ return true;
+ if (is_tm_callable (decl))
+ return true;
+ if (find_tm_replacement_function (decl))
+ return true;
+
+ /* If we aren't seeing the final version of the function we don't
+ know what it will contain at runtime. */
+ if (cgraph_function_body_availability (node) < AVAIL_AVAILABLE)
+ return true;
+
+ /* If the function must go irrevocable, then of course true. */
+ if (d->is_irrevocable)
+ return true;
+
+ /* If there are any blocks marked irrevocable, then the function
+ as a whole may enter irrevocable. */
+ if (d->irrevocable_blocks_clone)
+ return true;
+
+ /* We may have previously marked this function as tm_may_enter_irr;
+ see pass_diagnose_tm_blocks. */
+ if (node->local.tm_may_enter_irr)
+ return true;
+
+ /* Recurse on the main body for aliases. In general, this will
+ result in one of the bits above being set so that we will not
+ have to recurse next time. */
+ if (node->alias)
+ return ipa_tm_mayenterirr_function (cgraph_get_node (node->thunk.alias));
+
+ /* What remains is unmarked local functions without items that force
+ the function to go irrevocable. */
+ return false;
+}
+
+/* Diagnose calls from transaction_safe functions to unmarked
+ functions that are determined to not be safe. */
+
+static void
+ipa_tm_diagnose_tm_safe (struct cgraph_node *node)
+{
+ struct cgraph_edge *e;
+
+ for (e = node->callees; e ; e = e->next_callee)
+ if (!is_tm_callable (e->callee->decl)
+ && e->callee->local.tm_may_enter_irr)
+ error_at (gimple_location (e->call_stmt),
+ "unsafe function call %qD within "
+ "%<transaction_safe%> function", e->callee->decl);
+}
+
+/* Diagnose call from atomic transactions to unmarked functions
+ that are determined to not be safe. */
+
+static void
+ipa_tm_diagnose_transaction (struct cgraph_node *node,
+ struct tm_region *all_tm_regions)
+{
+ struct tm_region *r;
+
+ for (r = all_tm_regions; r ; r = r->next)
+ if (gimple_transaction_subcode (r->transaction_stmt) & GTMA_IS_RELAXED)
+ {
+ /* Atomic transactions can be nested inside relaxed. */
+ if (r->inner)
+ ipa_tm_diagnose_transaction (node, r->inner);
+ }
+ else
+ {
+ VEC (basic_block, heap) *bbs;
+ gimple_stmt_iterator gsi;
+ basic_block bb;
+ size_t i;
+
+ bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks,
+ r->irr_blocks, NULL, false);
+
+ for (i = 0; VEC_iterate (basic_block, bbs, i, bb); ++i)
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple stmt = gsi_stmt (gsi);
+ tree fndecl;
+
+ if (gimple_code (stmt) == GIMPLE_ASM)
+ {
+ error_at (gimple_location (stmt),
+ "asm not allowed in atomic transaction");
+ continue;
+ }
+
+ if (!is_gimple_call (stmt))
+ continue;
+ fndecl = gimple_call_fndecl (stmt);
+
+ /* Indirect function calls have been diagnosed already. */
+ if (!fndecl)
+ continue;
+
+ /* Stop at the end of the transaction. */
+ if (is_tm_ending_fndecl (fndecl))
+ {
+ if (bitmap_bit_p (r->exit_blocks, bb->index))
+ break;
+ continue;
+ }
+
+ /* Marked functions have been diagnosed already. */
+ if (is_tm_pure_call (stmt))
+ continue;
+ if (is_tm_callable (fndecl))
+ continue;
+
+ if (cgraph_local_info (fndecl)->tm_may_enter_irr)
+ error_at (gimple_location (stmt),
+ "unsafe function call %qD within "
+ "atomic transaction", fndecl);
+ }
+
+ VEC_free (basic_block, heap, bbs);
+ }
+}
+
+/* Return a transactional mangled name for the DECL_ASSEMBLER_NAME in
+ OLD_DECL. The returned value is a freshly malloced pointer that
+ should be freed by the caller. */
+
+static tree
+tm_mangle (tree old_asm_id)
+{
+ const char *old_asm_name;
+ char *tm_name;
+ void *alloc = NULL;
+ struct demangle_component *dc;
+ tree new_asm_id;
+
+ /* Determine if the symbol is already a valid C++ mangled name. Do this
+ even for C, which might be interfacing with C++ code via appropriately
+ ugly identifiers. */
+ /* ??? We could probably do just as well checking for "_Z" and be done. */
+ old_asm_name = IDENTIFIER_POINTER (old_asm_id);
+ dc = cplus_demangle_v3_components (old_asm_name, DMGL_NO_OPTS, &alloc);
+
+ if (dc == NULL)
+ {
+ char length[8];
+
+ do_unencoded:
+ sprintf (length, "%u", IDENTIFIER_LENGTH (old_asm_id));
+ tm_name = concat ("_ZGTt", length, old_asm_name, NULL);
+ }
+ else
+ {
+ old_asm_name += 2; /* Skip _Z */
+
+ switch (dc->type)
+ {
+ case DEMANGLE_COMPONENT_TRANSACTION_CLONE:
+ case DEMANGLE_COMPONENT_NONTRANSACTION_CLONE:
+ /* Don't play silly games, you! */
+ goto do_unencoded;
+
+ case DEMANGLE_COMPONENT_HIDDEN_ALIAS:
+ /* I'd really like to know if we can ever be passed one of
+ these from the C++ front end. The Logical Thing would
+ seem that hidden-alias should be outer-most, so that we
+ get hidden-alias of a transaction-clone and not vice-versa. */
+ old_asm_name += 2;
+ break;
+
+ default:
+ break;
+ }
+
+ tm_name = concat ("_ZGTt", old_asm_name, NULL);
+ }
+ free (alloc);
+
+ new_asm_id = get_identifier (tm_name);
+ free (tm_name);
+
+ return new_asm_id;
+}
+
+static inline void
+ipa_tm_mark_needed_node (struct cgraph_node *node)
+{
+ cgraph_mark_needed_node (node);
+ /* ??? function_and_variable_visibility will reset
+ the needed bit, without actually checking. */
+ node->analyzed = 1;
+}
+
+/* Callback data for ipa_tm_create_version_alias. */
+struct create_version_alias_info
+{
+ struct cgraph_node *old_node;
+ tree new_decl;
+};
+
+/* A subrontine of ipa_tm_create_version, called via
+ cgraph_for_node_and_aliases. Create new tm clones for each of
+ the existing aliases. */
+static bool
+ipa_tm_create_version_alias (struct cgraph_node *node, void *data)
+{
+ struct create_version_alias_info *info
+ = (struct create_version_alias_info *)data;
+ tree old_decl, new_decl, tm_name;
+ struct cgraph_node *new_node;
+
+ if (!node->same_body_alias)
+ return false;
+
+ old_decl = node->decl;
+ tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl));
+ new_decl = build_decl (DECL_SOURCE_LOCATION (old_decl),
+ TREE_CODE (old_decl), tm_name,
+ TREE_TYPE (old_decl));
+
+ SET_DECL_ASSEMBLER_NAME (new_decl, tm_name);
+ SET_DECL_RTL (new_decl, NULL);
+
+ /* Based loosely on C++'s make_alias_for(). */
+ TREE_PUBLIC (new_decl) = TREE_PUBLIC (old_decl);
+ DECL_CONTEXT (new_decl) = NULL;
+ TREE_READONLY (new_decl) = TREE_READONLY (old_decl);
+ DECL_EXTERNAL (new_decl) = 0;
+ DECL_ARTIFICIAL (new_decl) = 1;
+ TREE_ADDRESSABLE (new_decl) = 1;
+ TREE_USED (new_decl) = 1;
+ TREE_SYMBOL_REFERENCED (tm_name) = 1;
+
+ /* Perform the same remapping to the comdat group. */
+ if (DECL_COMDAT (new_decl))
+ DECL_COMDAT_GROUP (new_decl) = tm_mangle (DECL_COMDAT_GROUP (old_decl));
+
+ new_node = cgraph_same_body_alias (NULL, new_decl, info->new_decl);
+ new_node->tm_clone = true;
+ get_cg_data (node)->clone = new_node;
+
+ record_tm_clone_pair (old_decl, new_decl);
+
+ if (info->old_node->needed)
+ ipa_tm_mark_needed_node (new_node);
+ return false;
+}
+
+/* Create a copy of the function (possibly declaration only) of OLD_NODE,
+ appropriate for the transactional clone. */
+
+static void
+ipa_tm_create_version (struct cgraph_node *old_node)
+{
+ tree new_decl, old_decl, tm_name;
+ struct cgraph_node *new_node;
+
+ old_decl = old_node->decl;
+ new_decl = copy_node (old_decl);
+
+ /* DECL_ASSEMBLER_NAME needs to be set before we call
+ cgraph_copy_node_for_versioning below, because cgraph_node will
+ fill the assembler_name_hash. */
+ tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl));
+ SET_DECL_ASSEMBLER_NAME (new_decl, tm_name);
+ SET_DECL_RTL (new_decl, NULL);
+ TREE_SYMBOL_REFERENCED (tm_name) = 1;
+
+ /* Perform the same remapping to the comdat group. */
+ if (DECL_COMDAT (new_decl))
+ DECL_COMDAT_GROUP (new_decl) = tm_mangle (DECL_COMDAT_GROUP (old_decl));
+
+ new_node = cgraph_copy_node_for_versioning (old_node, new_decl, NULL, NULL);
+ new_node->lowered = true;
+ new_node->tm_clone = 1;
+ get_cg_data (old_node)->clone = new_node;
+
+ if (cgraph_function_body_availability (old_node) >= AVAIL_OVERWRITABLE)
+ {
+ /* Remap extern inline to static inline. */
+ /* ??? Is it worth trying to use make_decl_one_only? */
+ if (DECL_DECLARED_INLINE_P (new_decl) && DECL_EXTERNAL (new_decl))
+ {
+ DECL_EXTERNAL (new_decl) = 0;
+ TREE_PUBLIC (new_decl) = 0;
+ }
+
+ tree_function_versioning (old_decl, new_decl, NULL, false, NULL,
+ NULL, NULL);
+ }
+
+ record_tm_clone_pair (old_decl, new_decl);
+
+ cgraph_call_function_insertion_hooks (new_node);
+ if (old_node->needed)
+ ipa_tm_mark_needed_node (new_node);
+
+ /* Do the same thing, but for any aliases of the original node. */
+ {
+ struct create_version_alias_info data;
+ data.old_node = old_node;
+ data.new_decl = new_decl;
+ cgraph_for_node_and_aliases (old_node, ipa_tm_create_version_alias,
+ &data, true);
+ }
+}
+
+/* Construct a call to TM_IRREVOCABLE and insert it at the beginning of BB. */
+
+static void
+ipa_tm_insert_irr_call (struct cgraph_node *node, struct tm_region *region,
+ basic_block bb)
+{
+ gimple_stmt_iterator gsi;
+ gimple g;
+
+ transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
+
+ g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE),
+ 1, build_int_cst (NULL_TREE, MODE_SERIALIRREVOCABLE));
+
+ split_block_after_labels (bb);
+ gsi = gsi_after_labels (bb);
+ gsi_insert_before (&gsi, g, GSI_SAME_STMT);
+
+ cgraph_create_edge (node,
+ cgraph_get_create_node
+ (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE)),
+ g, 0,
+ compute_call_stmt_bb_frequency (node->decl,
+ gimple_bb (g)));
+}
+
+/* Construct a call to TM_GETTMCLONE and insert it before GSI. */
+
+static bool
+ipa_tm_insert_gettmclone_call (struct cgraph_node *node,
+ struct tm_region *region,
+ gimple_stmt_iterator *gsi, gimple stmt)
+{
+ tree gettm_fn, ret, old_fn, callfn;
+ gimple g, g2;
+ bool safe;
+
+ old_fn = gimple_call_fn (stmt);
+
+ if (TREE_CODE (old_fn) == ADDR_EXPR)
+ {
+ tree fndecl = TREE_OPERAND (old_fn, 0);
+ tree clone = get_tm_clone_pair (fndecl);
+
+ /* By transforming the call into a TM_GETTMCLONE, we are
+ technically taking the address of the original function and
+ its clone. Explain this so inlining will know this function
+ is needed. */
+ cgraph_mark_address_taken_node (cgraph_get_node (fndecl));
+ if (clone)
+ cgraph_mark_address_taken_node (cgraph_get_node (clone));
+ }
+
+ safe = is_tm_safe (TREE_TYPE (old_fn));
+ gettm_fn = builtin_decl_explicit (safe ? BUILT_IN_TM_GETTMCLONE_SAFE
+ : BUILT_IN_TM_GETTMCLONE_IRR);
+ ret = create_tmp_var (ptr_type_node, NULL);
+ add_referenced_var (ret);
+
+ if (!safe)
+ transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
+
+ /* Discard OBJ_TYPE_REF, since we weren't able to fold it. */
+ if (TREE_CODE (old_fn) == OBJ_TYPE_REF)
+ old_fn = OBJ_TYPE_REF_EXPR (old_fn);
+
+ g = gimple_build_call (gettm_fn, 1, old_fn);
+ ret = make_ssa_name (ret, g);
+ gimple_call_set_lhs (g, ret);
+
+ gsi_insert_before (gsi, g, GSI_SAME_STMT);
+
+ cgraph_create_edge (node, cgraph_get_create_node (gettm_fn), g, 0,
+ compute_call_stmt_bb_frequency (node->decl,
+ gimple_bb(g)));
+
+ /* Cast return value from tm_gettmclone* into appropriate function
+ pointer. */
+ callfn = create_tmp_var (TREE_TYPE (old_fn), NULL);
+ add_referenced_var (callfn);
+ g2 = gimple_build_assign (callfn,
+ fold_build1 (NOP_EXPR, TREE_TYPE (callfn), ret));
+ callfn = make_ssa_name (callfn, g2);
+ gimple_assign_set_lhs (g2, callfn);
+ gsi_insert_before (gsi, g2, GSI_SAME_STMT);
+
+ /* ??? This is a hack to preserve the NOTHROW bit on the call,
+ which we would have derived from the decl. Failure to save
+ this bit means we might have to split the basic block. */
+ if (gimple_call_nothrow_p (stmt))
+ gimple_call_set_nothrow (stmt, true);
+
+ gimple_call_set_fn (stmt, callfn);
+
+ /* Discarding OBJ_TYPE_REF above may produce incompatible LHS and RHS
+ for a call statement. Fix it. */
+ {
+ tree lhs = gimple_call_lhs (stmt);
+ tree rettype = TREE_TYPE (gimple_call_fntype (stmt));
+ if (lhs
+ && !useless_type_conversion_p (TREE_TYPE (lhs), rettype))
+ {
+ tree temp;
+
+ temp = make_rename_temp (rettype, 0);
+ gimple_call_set_lhs (stmt, temp);
+
+ g2 = gimple_build_assign (lhs,
+ fold_build1 (VIEW_CONVERT_EXPR,
+ TREE_TYPE (lhs), temp));
+ gsi_insert_after (gsi, g2, GSI_SAME_STMT);
+ }
+ }
+
+ update_stmt (stmt);
+
+ return true;
+}
+
+/* Helper function for ipa_tm_transform_calls*. Given a call
+ statement in GSI which resides inside transaction REGION, redirect
+ the call to either its wrapper function, or its clone. */
+
+static void
+ipa_tm_transform_calls_redirect (struct cgraph_node *node,
+ struct tm_region *region,
+ gimple_stmt_iterator *gsi,
+ bool *need_ssa_rename_p)
+{
+ gimple stmt = gsi_stmt (*gsi);
+ struct cgraph_node *new_node;
+ struct cgraph_edge *e = cgraph_edge (node, stmt);
+ tree fndecl = gimple_call_fndecl (stmt);
+
+ /* For indirect calls, pass the address through the runtime. */
+ if (fndecl == NULL)
+ {
+ *need_ssa_rename_p |=
+ ipa_tm_insert_gettmclone_call (node, region, gsi, stmt);
+ return;
+ }
+
+ /* Handle some TM builtins. Ordinarily these aren't actually generated
+ at this point, but handling these functions when written in by the
+ user makes it easier to build unit tests. */
+ if (flags_from_decl_or_type (fndecl) & ECF_TM_BUILTIN)
+ return;
+
+ /* Fixup recursive calls inside clones. */
+ /* ??? Why did cgraph_copy_node_for_versioning update the call edges
+ for recursion but not update the call statements themselves? */
+ if (e->caller == e->callee && decl_is_tm_clone (current_function_decl))
+ {
+ gimple_call_set_fndecl (stmt, current_function_decl);
+ return;
+ }
+
+ /* If there is a replacement, use it. */
+ fndecl = find_tm_replacement_function (fndecl);
+ if (fndecl)
+ {
+ new_node = cgraph_get_create_node (fndecl);
+
+ /* ??? Mark all transaction_wrap functions tm_may_enter_irr.
+
+ We can't do this earlier in record_tm_replacement because
+ cgraph_remove_unreachable_nodes is called before we inject
+ references to the node. Further, we can't do this in some
+ nice central place in ipa_tm_execute because we don't have
+ the exact list of wrapper functions that would be used.
+ Marking more wrappers than necessary results in the creation
+ of unnecessary cgraph_nodes, which can cause some of the
+ other IPA passes to crash.
+
+ We do need to mark these nodes so that we get the proper
+ result in expand_call_tm. */
+ /* ??? This seems broken. How is it that we're marking the
+ CALLEE as may_enter_irr? Surely we should be marking the
+ CALLER. Also note that find_tm_replacement_function also
+ contains mappings into the TM runtime, e.g. memcpy. These
+ we know won't go irrevocable. */
+ new_node->local.tm_may_enter_irr = 1;
+ }
+ else
+ {
+ struct tm_ipa_cg_data *d = get_cg_data (e->callee);
+ new_node = d->clone;
+
+ /* As we've already skipped pure calls and appropriate builtins,
+ and we've already marked irrevocable blocks, if we can't come
+ up with a static replacement, then ask the runtime. */
+ if (new_node == NULL)
+ {
+ *need_ssa_rename_p |=
+ ipa_tm_insert_gettmclone_call (node, region, gsi, stmt);
+ cgraph_remove_edge (e);
+ return;
+ }
+
+ fndecl = new_node->decl;
+ }
+
+ cgraph_redirect_edge_callee (e, new_node);
+ gimple_call_set_fndecl (stmt, fndecl);
+}
+
+/* Helper function for ipa_tm_transform_calls. For a given BB,
+ install calls to tm_irrevocable when IRR_BLOCKS are reached,
+ redirect other calls to the generated transactional clone. */
+
+static bool
+ipa_tm_transform_calls_1 (struct cgraph_node *node, struct tm_region *region,
+ basic_block bb, bitmap irr_blocks)
+{
+ gimple_stmt_iterator gsi;
+ bool need_ssa_rename = false;
+
+ if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index))
+ {
+ ipa_tm_insert_irr_call (node, region, bb);
+ return true;
+ }
+
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ gimple stmt = gsi_stmt (gsi);
+
+ if (!is_gimple_call (stmt))
+ continue;
+ if (is_tm_pure_call (stmt))
+ continue;
+
+ /* Redirect edges to the appropriate replacement or clone. */
+ ipa_tm_transform_calls_redirect (node, region, &gsi, &need_ssa_rename);
+ }
+
+ return need_ssa_rename;
+}
+
+/* Walk the CFG for REGION, beginning at BB. Install calls to
+ tm_irrevocable when IRR_BLOCKS are reached, redirect other calls to
+ the generated transactional clone. */
+
+static bool
+ipa_tm_transform_calls (struct cgraph_node *node, struct tm_region *region,
+ basic_block bb, bitmap irr_blocks)
+{
+ bool need_ssa_rename = false;
+ edge e;
+ edge_iterator ei;
+ VEC(basic_block, heap) *queue = NULL;
+ bitmap visited_blocks = BITMAP_ALLOC (NULL);
+
+ VEC_safe_push (basic_block, heap, queue, bb);
+ do
+ {
+ bb = VEC_pop (basic_block, queue);
+
+ need_ssa_rename |=
+ ipa_tm_transform_calls_1 (node, region, bb, irr_blocks);
+
+ if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index))
+ continue;
+
+ if (region && bitmap_bit_p (region->exit_blocks, bb->index))
+ continue;
+
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if (!bitmap_bit_p (visited_blocks, e->dest->index))
+ {
+ bitmap_set_bit (visited_blocks, e->dest->index);
+ VEC_safe_push (basic_block, heap, queue, e->dest);
+ }
+ }
+ while (!VEC_empty (basic_block, queue));
+
+ VEC_free (basic_block, heap, queue);
+ BITMAP_FREE (visited_blocks);
+
+ return need_ssa_rename;
+}
+
+/* Transform the calls within the TM regions within NODE. */
+
+static void
+ipa_tm_transform_transaction (struct cgraph_node *node)
+{
+ struct tm_ipa_cg_data *d = get_cg_data (node);
+ struct tm_region *region;
+ bool need_ssa_rename = false;
+
+ current_function_decl = node->decl;
+ push_cfun (DECL_STRUCT_FUNCTION (node->decl));
+ calculate_dominance_info (CDI_DOMINATORS);
+
+ for (region = d->all_tm_regions; region; region = region->next)
+ {
+ /* If we're sure to go irrevocable, don't transform anything. */
+ if (d->irrevocable_blocks_normal
+ && bitmap_bit_p (d->irrevocable_blocks_normal,
+ region->entry_block->index))
+ {
+ transaction_subcode_ior (region, GTMA_DOES_GO_IRREVOCABLE);
+ transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
+ continue;
+ }
+
+ need_ssa_rename |=
+ ipa_tm_transform_calls (node, region, region->entry_block,
+ d->irrevocable_blocks_normal);
+ }
+
+ if (need_ssa_rename)
+ update_ssa (TODO_update_ssa_only_virtuals);
+
+ pop_cfun ();
+ current_function_decl = NULL;
+}
+
+/* Transform the calls within the transactional clone of NODE. */
+
+static void
+ipa_tm_transform_clone (struct cgraph_node *node)
+{
+ struct tm_ipa_cg_data *d = get_cg_data (node);
+ bool need_ssa_rename;
+
+ /* If this function makes no calls and has no irrevocable blocks,
+ then there's nothing to do. */
+ /* ??? Remove non-aborting top-level transactions. */
+ if (!node->callees && !d->irrevocable_blocks_clone)
+ return;
+
+ current_function_decl = d->clone->decl;
+ push_cfun (DECL_STRUCT_FUNCTION (current_function_decl));
+ calculate_dominance_info (CDI_DOMINATORS);
+
+ need_ssa_rename =
+ ipa_tm_transform_calls (d->clone, NULL, single_succ (ENTRY_BLOCK_PTR),
+ d->irrevocable_blocks_clone);
+
+ if (need_ssa_rename)
+ update_ssa (TODO_update_ssa_only_virtuals);
+
+ pop_cfun ();
+ current_function_decl = NULL;
+}
+
+/* Main entry point for the transactional memory IPA pass. */
+
+static unsigned int
+ipa_tm_execute (void)
+{
+ cgraph_node_queue tm_callees = NULL;
+ /* List of functions that will go irrevocable. */
+ cgraph_node_queue irr_worklist = NULL;
+
+ struct cgraph_node *node;
+ struct tm_ipa_cg_data *d;
+ enum availability a;
+ unsigned int i;
+
+#ifdef ENABLE_CHECKING
+ verify_cgraph ();
+#endif
+
+ bitmap_obstack_initialize (&tm_obstack);
+
+ /* For all local functions marked tm_callable, queue them. */
+ for (node = cgraph_nodes; node; node = node->next)
+ if (is_tm_callable (node->decl)
+ && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
+ {
+ d = get_cg_data (node);
+ maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
+ }
+
+ /* For all local reachable functions... */
+ for (node = cgraph_nodes; node; node = node->next)
+ if (node->reachable && node->lowered
+ && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
+ {
+ /* ... marked tm_pure, record that fact for the runtime by
+ indicating that the pure function is its own tm_callable.
+ No need to do this if the function's address can't be taken. */
+ if (is_tm_pure (node->decl))
+ {
+ if (!node->local.local)
+ record_tm_clone_pair (node->decl, node->decl);
+ continue;
+ }
+
+ current_function_decl = node->decl;
+ push_cfun (DECL_STRUCT_FUNCTION (node->decl));
+ calculate_dominance_info (CDI_DOMINATORS);
+
+ tm_region_init (NULL);
+ if (all_tm_regions)
+ {
+ d = get_cg_data (node);
+
+ /* Scan for calls that are in each transaction. */
+ ipa_tm_scan_calls_transaction (d, &tm_callees);
+
+ /* If we saw something that will make us go irrevocable, put it
+ in the worklist so we can scan the function later
+ (ipa_tm_scan_irr_function) and mark the irrevocable blocks. */
+ if (node->local.tm_may_enter_irr)
+ {
+ maybe_push_queue (node, &irr_worklist, &d->in_worklist);
+ d->want_irr_scan_normal = true;
+ }
+ }
+
+ pop_cfun ();
+ current_function_decl = NULL;
+ }
+
+ /* For every local function on the callee list, scan as if we will be
+ creating a transactional clone, queueing all new functions we find
+ along the way. */
+ for (i = 0; i < VEC_length (cgraph_node_p, tm_callees); ++i)
+ {
+ node = VEC_index (cgraph_node_p, tm_callees, i);
+ a = cgraph_function_body_availability (node);
+ d = get_cg_data (node);
+
+ /* If we saw something that will make us go irrevocable, put it
+ in the worklist so we can scan the function later
+ (ipa_tm_scan_irr_function) and mark the irrevocable blocks. */
+ if (node->local.tm_may_enter_irr)
+ maybe_push_queue (node, &irr_worklist, &d->in_worklist);
+
+ /* Some callees cannot be arbitrarily cloned. These will always be
+ irrevocable. Mark these now, so that we need not scan them. */
+ if (is_tm_irrevocable (node->decl))
+ ipa_tm_note_irrevocable (node, &irr_worklist);
+ else if (a <= AVAIL_NOT_AVAILABLE
+ && !is_tm_safe_or_pure (node->decl))
+ ipa_tm_note_irrevocable (node, &irr_worklist);
+ else if (a >= AVAIL_OVERWRITABLE)
+ {
+ if (!tree_versionable_function_p (node->decl))
+ ipa_tm_note_irrevocable (node, &irr_worklist);
+ else if (!d->is_irrevocable)
+ {
+ /* If this is an alias, make sure its base is queued as well.
+ we need not scan the callees now, as the base will do. */
+ if (node->alias)
+ {
+ node = cgraph_get_node (node->thunk.alias);
+ d = get_cg_data (node);
+ maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
+ continue;
+ }
+
+ /* Add all nodes called by this function into
+ tm_callees as well. */
+ ipa_tm_scan_calls_clone (node, &tm_callees);
+ }
+ }
+ }
+
+ /* Iterate scans until no more work to be done. Prefer not to use
+ VEC_pop because the worklist tends to follow a breadth-first
+ search of the callgraph, which should allow convergance with a
+ minimum number of scans. But we also don't want the worklist
+ array to grow without bound, so we shift the array up periodically. */
+ for (i = 0; i < VEC_length (cgraph_node_p, irr_worklist); ++i)
+ {
+ if (i > 256 && i == VEC_length (cgraph_node_p, irr_worklist) / 8)
+ {
+ VEC_block_remove (cgraph_node_p, irr_worklist, 0, i);
+ i = 0;
+ }
+
+ node = VEC_index (cgraph_node_p, irr_worklist, i);
+ d = get_cg_data (node);
+ d->in_worklist = false;
+
+ if (d->want_irr_scan_normal)
+ {
+ d->want_irr_scan_normal = false;
+ ipa_tm_scan_irr_function (node, false);
+ }
+ if (d->in_callee_queue && ipa_tm_scan_irr_function (node, true))
+ ipa_tm_note_irrevocable (node, &irr_worklist);
+ }
+
+ /* For every function on the callee list, collect the tm_may_enter_irr
+ bit on the node. */
+ VEC_truncate (cgraph_node_p, irr_worklist, 0);
+ for (i = 0; i < VEC_length (cgraph_node_p, tm_callees); ++i)
+ {
+ node = VEC_index (cgraph_node_p, tm_callees, i);
+ if (ipa_tm_mayenterirr_function (node))
+ {
+ d = get_cg_data (node);
+ gcc_assert (d->in_worklist == false);
+ maybe_push_queue (node, &irr_worklist, &d->in_worklist);
+ }
+ }
+
+ /* Propagate the tm_may_enter_irr bit to callers until stable. */
+ for (i = 0; i < VEC_length (cgraph_node_p, irr_worklist); ++i)
+ {
+ struct cgraph_node *caller;
+ struct cgraph_edge *e;
+ struct ipa_ref *ref;
+ unsigned j;
+
+ if (i > 256 && i == VEC_length (cgraph_node_p, irr_worklist) / 8)
+ {
+ VEC_block_remove (cgraph_node_p, irr_worklist, 0, i);
+ i = 0;
+ }
+
+ node = VEC_index (cgraph_node_p, irr_worklist, i);
+ d = get_cg_data (node);
+ d->in_worklist = false;
+ node->local.tm_may_enter_irr = true;
+
+ /* Propagate back to normal callers. */
+ for (e = node->callers; e ; e = e->next_caller)
+ {
+ caller = e->caller;
+ if (!is_tm_safe_or_pure (caller->decl)
+ && !caller->local.tm_may_enter_irr)
+ {
+ d = get_cg_data (caller);
+ maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
+ }
+ }
+
+ /* Propagate back to referring aliases as well. */
+ for (j = 0; ipa_ref_list_refering_iterate (&node->ref_list, j, ref); j++)
+ {
+ caller = ref->refering.cgraph_node;
+ if (ref->use == IPA_REF_ALIAS
+ && !caller->local.tm_may_enter_irr)
+ {
+ d = get_cg_data (caller);
+ maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
+ }
+ }
+ }
+
+ /* Now validate all tm_safe functions, and all atomic regions in
+ other functions. */
+ for (node = cgraph_nodes; node; node = node->next)
+ if (node->reachable && node->lowered
+ && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
+ {
+ d = get_cg_data (node);
+ if (is_tm_safe (node->decl))
+ ipa_tm_diagnose_tm_safe (node);
+ else if (d->all_tm_regions)
+ ipa_tm_diagnose_transaction (node, d->all_tm_regions);
+ }
+
+ /* Create clones. Do those that are not irrevocable and have a
+ positive call count. Do those publicly visible functions that
+ the user directed us to clone. */
+ for (i = 0; i < VEC_length (cgraph_node_p, tm_callees); ++i)
+ {
+ bool doit = false;
+
+ node = VEC_index (cgraph_node_p, tm_callees, i);
+ if (node->same_body_alias)
+ continue;
+
+ a = cgraph_function_body_availability (node);
+ d = get_cg_data (node);
+
+ if (a <= AVAIL_NOT_AVAILABLE)
+ doit = is_tm_callable (node->decl);
+ else if (a <= AVAIL_AVAILABLE && is_tm_callable (node->decl))
+ doit = true;
+ else if (!d->is_irrevocable
+ && d->tm_callers_normal + d->tm_callers_clone > 0)
+ doit = true;
+
+ if (doit)
+ ipa_tm_create_version (node);
+ }
+
+ /* Redirect calls to the new clones, and insert irrevocable marks. */
+ for (i = 0; i < VEC_length (cgraph_node_p, tm_callees); ++i)
+ {
+ node = VEC_index (cgraph_node_p, tm_callees, i);
+ if (node->analyzed)
+ {
+ d = get_cg_data (node);
+ if (d->clone)
+ ipa_tm_transform_clone (node);
+ }
+ }
+ for (node = cgraph_nodes; node; node = node->next)
+ if (node->reachable && node->lowered
+ && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
+ {
+ d = get_cg_data (node);
+ if (d->all_tm_regions)
+ ipa_tm_transform_transaction (node);
+ }
+
+ /* Free and clear all data structures. */
+ VEC_free (cgraph_node_p, heap, tm_callees);
+ VEC_free (cgraph_node_p, heap, irr_worklist);
+ bitmap_obstack_release (&tm_obstack);
+
+ for (node = cgraph_nodes; node; node = node->next)
+ node->aux = NULL;
+
+#ifdef ENABLE_CHECKING
+ verify_cgraph ();
+#endif
+
+ return 0;
+}
+
+struct simple_ipa_opt_pass pass_ipa_tm =
+{
+ {
+ SIMPLE_IPA_PASS,
+ "tmipa", /* name */
+ gate_tm, /* gate */
+ ipa_tm_execute, /* execute */
+ NULL, /* sub */
+ NULL, /* next */
+ 0, /* static_pass_number */
+ TV_TRANS_MEM, /* tv_id */
+ PROP_ssa | PROP_cfg, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ TODO_dump_func, /* todo_flags_finish */
+ },
+};
+
+#include "gt-trans-mem.h"
diff --git a/gcc/trans-mem.h b/gcc/trans-mem.h
new file mode 100644
index 00000000000..95e9e7e5f00
--- /dev/null
+++ b/gcc/trans-mem.h
@@ -0,0 +1,35 @@
+/* Miscellaneous transactional memory support definitions.
+ Copyright (C) 2009, 2011 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 3, or (at your option) any later
+ version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+
+/* These defines must match the enumerations in libitm.h. */
+#define PR_INSTRUMENTEDCODE 0x0001
+#define PR_UNINSTRUMENTEDCODE 0x0002
+#define PR_HASNOXMMUPDATE 0x0004
+#define PR_HASNOABORT 0x0008
+#define PR_HASNOIRREVOCABLE 0x0020
+#define PR_DOESGOIRREVOCABLE 0x0040
+#define PR_HASNOSIMPLEREADS 0x0080
+#define PR_AWBARRIERSOMITTED 0x0100
+#define PR_RARBARRIERSOMITTED 0x0200
+#define PR_UNDOLOGCODE 0x0400
+#define PR_PREFERUNINSTRUMENTED 0x0800
+#define PR_EXCEPTIONBLOCK 0x1000
+#define PR_HASELSE 0x2000
+#define PR_READONLY 0x4000
diff --git a/gcc/tree-cfg.c b/gcc/tree-cfg.c
index 7ec50dbabd4..d81cc670bf4 100644
--- a/gcc/tree-cfg.c
+++ b/gcc/tree-cfg.c
@@ -117,6 +117,7 @@ static int gimple_verify_flow_info (void);
static void gimple_make_forwarder_block (edge);
static void gimple_cfg2vcg (FILE *);
static gimple first_non_label_stmt (basic_block);
+static bool verify_gimple_transaction (gimple);
/* Flowgraph optimization and cleanup. */
static void gimple_merge_blocks (basic_block, basic_block);
@@ -666,6 +667,15 @@ make_edges (void)
}
break;
+ case GIMPLE_TRANSACTION:
+ {
+ tree abort_label = gimple_transaction_label (last);
+ if (abort_label)
+ make_edge (bb, label_to_block (abort_label), 0);
+ fallthru = true;
+ }
+ break;
+
default:
gcc_assert (!stmt_ends_bb_p (last));
fallthru = true;
@@ -1196,22 +1206,30 @@ cleanup_dead_labels (void)
FOR_EACH_BB (bb)
{
gimple stmt = last_stmt (bb);
+ tree label, new_label;
+
if (!stmt)
continue;
switch (gimple_code (stmt))
{
case GIMPLE_COND:
- {
- tree true_label = gimple_cond_true_label (stmt);
- tree false_label = gimple_cond_false_label (stmt);
+ label = gimple_cond_true_label (stmt);
+ if (label)
+ {
+ new_label = main_block_label (label);
+ if (new_label != label)
+ gimple_cond_set_true_label (stmt, new_label);
+ }
- if (true_label)
- gimple_cond_set_true_label (stmt, main_block_label (true_label));
- if (false_label)
- gimple_cond_set_false_label (stmt, main_block_label (false_label));
- break;
- }
+ label = gimple_cond_false_label (stmt);
+ if (label)
+ {
+ new_label = main_block_label (label);
+ if (new_label != label)
+ gimple_cond_set_false_label (stmt, new_label);
+ }
+ break;
case GIMPLE_SWITCH:
{
@@ -1221,8 +1239,10 @@ cleanup_dead_labels (void)
for (i = 0; i < n; ++i)
{
tree case_label = gimple_switch_label (stmt, i);
- tree label = main_block_label (CASE_LABEL (case_label));
- CASE_LABEL (case_label) = label;
+ label = CASE_LABEL (case_label);
+ new_label = main_block_label (label);
+ if (new_label != label)
+ CASE_LABEL (case_label) = new_label;
}
break;
}
@@ -1243,13 +1263,27 @@ cleanup_dead_labels (void)
/* We have to handle gotos until they're removed, and we don't
remove them until after we've created the CFG edges. */
case GIMPLE_GOTO:
- if (!computed_goto_p (stmt))
+ if (!computed_goto_p (stmt))
{
- tree new_dest = main_block_label (gimple_goto_dest (stmt));
- gimple_goto_set_dest (stmt, new_dest);
+ label = gimple_goto_dest (stmt);
+ new_label = main_block_label (label);
+ if (new_label != label)
+ gimple_goto_set_dest (stmt, new_label);
}
break;
+ case GIMPLE_TRANSACTION:
+ {
+ tree label = gimple_transaction_label (stmt);
+ if (label)
+ {
+ tree new_label = main_block_label (label);
+ if (new_label != label)
+ gimple_transaction_set_label (stmt, new_label);
+ }
+ }
+ break;
+
default:
break;
}
@@ -2272,6 +2306,13 @@ is_ctrl_altering_stmt (gimple t)
if (flags & ECF_NORETURN)
return true;
+ /* TM ending statements have backedges out of the transaction.
+ Return true so we split the basic block containing them.
+ Note that the TM_BUILTIN test is merely an optimization. */
+ if ((flags & ECF_TM_BUILTIN)
+ && is_tm_ending_fndecl (gimple_call_fndecl (t)))
+ return true;
+
/* BUILT_IN_RETURN call is same as return statement. */
if (gimple_call_builtin_p (t, BUILT_IN_RETURN))
return true;
@@ -2293,6 +2334,10 @@ is_ctrl_altering_stmt (gimple t)
/* OpenMP directives alter control flow. */
return true;
+ case GIMPLE_TRANSACTION:
+ /* A transaction start alters control flow. */
+ return true;
+
default:
break;
}
@@ -4063,7 +4108,6 @@ verify_gimple_switch (gimple stmt)
return false;
}
-
/* Verify a gimple debug statement STMT.
Returns true if anything is wrong. */
@@ -4164,6 +4208,9 @@ verify_gimple_stmt (gimple stmt)
case GIMPLE_ASM:
return false;
+ case GIMPLE_TRANSACTION:
+ return verify_gimple_transaction (stmt);
+
/* Tuples that do not have tree operands. */
case GIMPLE_NOP:
case GIMPLE_PREDICT:
@@ -4280,10 +4327,19 @@ verify_gimple_in_seq_2 (gimple_seq stmts)
err |= verify_gimple_in_seq_2 (gimple_eh_filter_failure (stmt));
break;
+ case GIMPLE_EH_ELSE:
+ err |= verify_gimple_in_seq_2 (gimple_eh_else_n_body (stmt));
+ err |= verify_gimple_in_seq_2 (gimple_eh_else_e_body (stmt));
+ break;
+
case GIMPLE_CATCH:
err |= verify_gimple_in_seq_2 (gimple_catch_handler (stmt));
break;
+ case GIMPLE_TRANSACTION:
+ err |= verify_gimple_transaction (stmt);
+ break;
+
default:
{
bool err2 = verify_gimple_stmt (stmt);
@@ -4297,6 +4353,18 @@ verify_gimple_in_seq_2 (gimple_seq stmts)
return err;
}
+/* Verify the contents of a GIMPLE_TRANSACTION. Returns true if there
+ is a problem, otherwise false. */
+
+static bool
+verify_gimple_transaction (gimple stmt)
+{
+ tree lab = gimple_transaction_label (stmt);
+ if (lab != NULL && TREE_CODE (lab) != LABEL_DECL)
+ return true;
+ return verify_gimple_in_seq_2 (gimple_transaction_body (stmt));
+}
+
/* Verify the GIMPLE statements inside the statement list STMTS. */
@@ -5061,6 +5129,13 @@ gimple_redirect_edge_and_branch (edge e, basic_block dest)
redirect_eh_dispatch_edge (stmt, e, dest);
break;
+ case GIMPLE_TRANSACTION:
+ /* The ABORT edge has a stored label associated with it, otherwise
+ the edges are simply redirectable. */
+ if (e->flags == 0)
+ gimple_transaction_set_label (stmt, gimple_block_label (dest));
+ break;
+
default:
/* Otherwise it must be a fallthru edge, and we don't need to
do anything besides redirecting it. */
@@ -6443,8 +6518,10 @@ dump_function_to_file (tree fn, FILE *file, int flags)
bool ignore_topmost_bind = false, any_var = false;
basic_block bb;
tree chain;
+ bool tmclone = TREE_CODE (fn) == FUNCTION_DECL && decl_is_tm_clone (fn);
- fprintf (file, "%s (", lang_hooks.decl_printable_name (fn, 2));
+ fprintf (file, "%s %s(", lang_hooks.decl_printable_name (fn, 2),
+ tmclone ? "[tm-clone] " : "");
arg = DECL_ARGUMENTS (fn);
while (arg)
diff --git a/gcc/tree-eh.c b/gcc/tree-eh.c
index fbc444ca716..440ac0f4082 100644
--- a/gcc/tree-eh.c
+++ b/gcc/tree-eh.c
@@ -54,26 +54,6 @@ using_eh_for_cleanups (void)
/* Misc functions used in this file. */
-/* Compare and hash for any structure which begins with a canonical
- pointer. Assumes all pointers are interchangeable, which is sort
- of already assumed by gcc elsewhere IIRC. */
-
-static int
-struct_ptr_eq (const void *a, const void *b)
-{
- const void * const * x = (const void * const *) a;
- const void * const * y = (const void * const *) b;
- return *x == *y;
-}
-
-static hashval_t
-struct_ptr_hash (const void *a)
-{
- const void * const * x = (const void * const *) a;
- return (size_t)*x >> 4;
-}
-
-
/* Remember and lookup EH landing pad data for arbitrary statements.
Really this means any statement that could_throw_p. We could
stuff this information into the stmt_ann data structure, but:
@@ -284,6 +264,11 @@ collect_finally_tree (gimple stmt, gimple region)
collect_finally_tree_1 (gimple_eh_filter_failure (stmt), region);
break;
+ case GIMPLE_EH_ELSE:
+ collect_finally_tree_1 (gimple_eh_else_n_body (stmt), region);
+ collect_finally_tree_1 (gimple_eh_else_e_body (stmt), region);
+ break;
+
default:
/* A type, a decl, or some kind of statement that we're not
interested in. Don't walk them. */
@@ -534,6 +519,10 @@ replace_goto_queue_1 (gimple stmt, struct leh_tf_state *tf,
case GIMPLE_EH_FILTER:
replace_goto_queue_stmt_list (gimple_eh_filter_failure (stmt), tf);
break;
+ case GIMPLE_EH_ELSE:
+ replace_goto_queue_stmt_list (gimple_eh_else_n_body (stmt), tf);
+ replace_goto_queue_stmt_list (gimple_eh_else_e_body (stmt), tf);
+ break;
default:
/* These won't have gotos in them. */
@@ -921,6 +910,21 @@ lower_try_finally_fallthru_label (struct leh_tf_state *tf)
return label;
}
+/* A subroutine of lower_try_finally. If FINALLY consits of a
+ GIMPLE_EH_ELSE node, return it. */
+
+static inline gimple
+get_eh_else (gimple_seq finally)
+{
+ gimple x = gimple_seq_first_stmt (finally);
+ if (gimple_code (x) == GIMPLE_EH_ELSE)
+ {
+ gcc_assert (gimple_seq_singleton_p (finally));
+ return x;
+ }
+ return NULL;
+}
+
/* A subroutine of lower_try_finally. If the eh_protect_cleanup_actions
langhook returns non-null, then the language requires that the exception
path out of a try_finally be treated specially. To wit: the code within
@@ -950,7 +954,7 @@ honor_protect_cleanup_actions (struct leh_state *outer_state,
gimple_stmt_iterator gsi;
bool finally_may_fallthru;
gimple_seq finally;
- gimple x;
+ gimple x, eh_else;
/* First check for nothing to do. */
if (lang_hooks.eh_protect_cleanup_actions == NULL)
@@ -960,12 +964,18 @@ honor_protect_cleanup_actions (struct leh_state *outer_state,
return;
finally = gimple_try_cleanup (tf->top_p);
- finally_may_fallthru = gimple_seq_may_fallthru (finally);
+ eh_else = get_eh_else (finally);
/* Duplicate the FINALLY block. Only need to do this for try-finally,
- and not for cleanups. */
- if (this_state)
+ and not for cleanups. If we've got an EH_ELSE, extract it now. */
+ if (eh_else)
+ {
+ finally = gimple_eh_else_e_body (eh_else);
+ gimple_try_set_cleanup (tf->top_p, gimple_eh_else_n_body (eh_else));
+ }
+ else if (this_state)
finally = lower_try_finally_dup_block (finally, outer_state);
+ finally_may_fallthru = gimple_seq_may_fallthru (finally);
/* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP
set, the handler of the TRY_CATCH_EXPR is another cleanup which ought
@@ -1011,7 +1021,7 @@ lower_try_finally_nofallthru (struct leh_state *state,
struct leh_tf_state *tf)
{
tree lab;
- gimple x;
+ gimple x, eh_else;
gimple_seq finally;
struct goto_queue_node *q, *qe;
@@ -1034,15 +1044,35 @@ lower_try_finally_nofallthru (struct leh_state *state,
replace_goto_queue (tf);
- lower_eh_constructs_1 (state, finally);
- gimple_seq_add_seq (&tf->top_p_seq, finally);
+ /* Emit the finally block into the stream. Lower EH_ELSE at this time. */
+ eh_else = get_eh_else (finally);
+ if (eh_else)
+ {
+ finally = gimple_eh_else_n_body (eh_else);
+ lower_eh_constructs_1 (state, finally);
+ gimple_seq_add_seq (&tf->top_p_seq, finally);
- if (tf->may_throw)
+ if (tf->may_throw)
+ {
+ finally = gimple_eh_else_e_body (eh_else);
+ lower_eh_constructs_1 (state, finally);
+
+ emit_post_landing_pad (&eh_seq, tf->region);
+ gimple_seq_add_seq (&eh_seq, finally);
+ }
+ }
+ else
{
- emit_post_landing_pad (&eh_seq, tf->region);
+ lower_eh_constructs_1 (state, finally);
+ gimple_seq_add_seq (&tf->top_p_seq, finally);
- x = gimple_build_goto (lab);
- gimple_seq_add_stmt (&eh_seq, x);
+ if (tf->may_throw)
+ {
+ emit_post_landing_pad (&eh_seq, tf->region);
+
+ x = gimple_build_goto (lab);
+ gimple_seq_add_stmt (&eh_seq, x);
+ }
}
}
@@ -1062,6 +1092,18 @@ lower_try_finally_onedest (struct leh_state *state, struct leh_tf_state *tf)
finally = gimple_try_cleanup (tf->top_p);
tf->top_p_seq = gimple_try_eval (tf->top_p);
+ /* Since there's only one destination, and the destination edge can only
+ either be EH or non-EH, that implies that all of our incoming edges
+ are of the same type. Therefore we can lower EH_ELSE immediately. */
+ x = get_eh_else (finally);
+ if (x)
+ {
+ if (tf->may_throw)
+ finally = gimple_eh_else_e_body (x);
+ else
+ finally = gimple_eh_else_n_body (x);
+ }
+
lower_eh_constructs_1 (state, finally);
if (tf->may_throw)
@@ -1132,11 +1174,18 @@ lower_try_finally_copy (struct leh_state *state, struct leh_tf_state *tf)
gimple_seq finally;
gimple_seq new_stmt;
gimple_seq seq;
- gimple x;
+ gimple x, eh_else;
tree tmp;
location_t tf_loc = gimple_location (tf->try_finally_expr);
finally = gimple_try_cleanup (tf->top_p);
+
+ /* Notice EH_ELSE, and simplify some of the remaining code
+ by considering FINALLY to be the normal return path only. */
+ eh_else = get_eh_else (finally);
+ if (eh_else)
+ finally = gimple_eh_else_n_body (eh_else);
+
tf->top_p_seq = gimple_try_eval (tf->top_p);
new_stmt = NULL;
@@ -1153,7 +1202,12 @@ lower_try_finally_copy (struct leh_state *state, struct leh_tf_state *tf)
if (tf->may_throw)
{
- seq = lower_try_finally_dup_block (finally, state);
+ /* We don't need to copy the EH path of EH_ELSE,
+ since it is only emitted once. */
+ if (eh_else)
+ seq = gimple_eh_else_e_body (eh_else);
+ else
+ seq = lower_try_finally_dup_block (finally, state);
lower_eh_constructs_1 (state, seq);
emit_post_landing_pad (&eh_seq, tf->region);
@@ -1252,7 +1306,7 @@ lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
tree last_case;
VEC (tree,heap) *case_label_vec;
gimple_seq switch_body;
- gimple x;
+ gimple x, eh_else;
tree tmp;
gimple switch_stmt;
gimple_seq finally;
@@ -1263,9 +1317,10 @@ lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
location_t finally_loc;
switch_body = gimple_seq_alloc ();
+ finally = gimple_try_cleanup (tf->top_p);
+ eh_else = get_eh_else (finally);
/* Mash the TRY block to the head of the chain. */
- finally = gimple_try_cleanup (tf->top_p);
tf->top_p_seq = gimple_try_eval (tf->top_p);
/* The location of the finally is either the last stmt in the finally
@@ -1281,7 +1336,7 @@ lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
nlabels = VEC_length (tree, tf->dest_array);
return_index = nlabels;
eh_index = return_index + tf->may_return;
- fallthru_index = eh_index + tf->may_throw;
+ fallthru_index = eh_index + (tf->may_throw && !eh_else);
ndests = fallthru_index + tf->may_fallthru;
finally_tmp = create_tmp_var (integer_type_node, "finally_tmp");
@@ -1319,7 +1374,23 @@ lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
gimple_seq_add_stmt (&switch_body, x);
}
- if (tf->may_throw)
+ /* For EH_ELSE, emit the exception path (plus resx) now, then
+ subsequently we only need consider the normal path. */
+ if (eh_else)
+ {
+ if (tf->may_throw)
+ {
+ finally = gimple_eh_else_e_body (eh_else);
+ lower_eh_constructs_1 (state, finally);
+
+ emit_post_landing_pad (&eh_seq, tf->region);
+ gimple_seq_add_seq (&eh_seq, finally);
+ emit_resx (&eh_seq, tf->region);
+ }
+
+ finally = gimple_eh_else_n_body (eh_else);
+ }
+ else if (tf->may_throw)
{
emit_post_landing_pad (&eh_seq, tf->region);
@@ -1452,12 +1523,22 @@ lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
the estimate of the size of the switch machinery we'd have to add. */
static bool
-decide_copy_try_finally (int ndests, gimple_seq finally)
+decide_copy_try_finally (int ndests, bool may_throw, gimple_seq finally)
{
int f_estimate, sw_estimate;
+ gimple eh_else;
+
+ /* If there's an EH_ELSE involved, the exception path is separate
+ and really doesn't come into play for this computation. */
+ eh_else = get_eh_else (finally);
+ if (eh_else)
+ {
+ ndests -= may_throw;
+ finally = gimple_eh_else_n_body (eh_else);
+ }
if (!optimize)
- return false;
+ return ndests == 1;
/* Finally estimate N times, plus N gotos. */
f_estimate = count_insns_seq (finally, &eni_size_weights);
@@ -1563,7 +1644,8 @@ lower_try_finally (struct leh_state *state, gimple tp)
/* We can easily special-case redirection to a single destination. */
else if (ndests == 1)
lower_try_finally_onedest (state, &this_tf);
- else if (decide_copy_try_finally (ndests, gimple_try_cleanup (tp)))
+ else if (decide_copy_try_finally (ndests, this_tf.may_throw,
+ gimple_try_cleanup (tp)))
lower_try_finally_copy (state, &this_tf);
else
lower_try_finally_switch (state, &this_tf);
@@ -1928,6 +2010,9 @@ lower_eh_constructs_2 (struct leh_state *state, gimple_stmt_iterator *gsi)
case GIMPLE_EH_MUST_NOT_THROW:
replace = lower_eh_must_not_throw (state, stmt);
break;
+ case GIMPLE_EH_ELSE:
+ /* This code is only valid with GIMPLE_TRY_FINALLY. */
+ gcc_unreachable ();
default:
replace = lower_cleanup (state, stmt);
break;
@@ -1942,6 +2027,10 @@ lower_eh_constructs_2 (struct leh_state *state, gimple_stmt_iterator *gsi)
/* Return since we don't want gsi_next () */
return;
+ case GIMPLE_EH_ELSE:
+ /* We should be eliminating this in lower_try_finally et al. */
+ gcc_unreachable ();
+
default:
/* A type, a decl, or some kind of statement that we're not
interested in. Don't walk them. */
@@ -2832,6 +2921,10 @@ refactor_eh_r (gimple_seq seq)
case GIMPLE_EH_FILTER:
refactor_eh_r (gimple_eh_filter_failure (one));
break;
+ case GIMPLE_EH_ELSE:
+ refactor_eh_r (gimple_eh_else_n_body (one));
+ refactor_eh_r (gimple_eh_else_e_body (one));
+ break;
default:
break;
}
diff --git a/gcc/tree-flow.h b/gcc/tree-flow.h
index dcfbb9da628..211c1079f3c 100644
--- a/gcc/tree-flow.h
+++ b/gcc/tree-flow.h
@@ -33,6 +33,14 @@ along with GCC; see the file COPYING3. If not see
#include "tree-ssa-alias.h"
+/* This structure is used to map a gimple statement to a label,
+ or list of labels to represent transaction restart. */
+
+struct GTY(()) tm_restart_node {
+ gimple stmt;
+ tree label_or_list;
+};
+
/* Gimple dataflow datastructure. All publicly available fields shall have
gimple_ accessor defined in tree-flow-inline.h, all publicly modifiable
fields should have gimple_set accessor. */
@@ -80,6 +88,10 @@ struct GTY(()) gimple_df {
unsigned int ipa_pta : 1;
struct ssa_operands ssa_operands;
+
+ /* Map gimple stmt to tree label (or list of labels) for transaction
+ restart and abort. */
+ htab_t GTY ((param_is (struct tm_restart_node))) tm_restart;
};
/* Accessors for internal use only. Generic code should use abstraction
diff --git a/gcc/tree-inline.c b/gcc/tree-inline.c
index 11be8d0791f..4ca4fa464f9 100644
--- a/gcc/tree-inline.c
+++ b/gcc/tree-inline.c
@@ -1365,6 +1365,12 @@ remap_gimple_stmt (gimple stmt, copy_body_data *id)
= gimple_build_omp_critical (s1, gimple_omp_critical_name (stmt));
break;
+ case GIMPLE_TRANSACTION:
+ s1 = remap_gimple_seq (gimple_transaction_body (stmt), id);
+ copy = gimple_build_transaction (s1, gimple_transaction_label (stmt));
+ gimple_transaction_set_subcode (copy, gimple_transaction_subcode (stmt));
+ break;
+
default:
gcc_unreachable ();
}
@@ -3600,6 +3606,11 @@ estimate_num_insns (gimple stmt, eni_weights *weights)
return (weights->omp_cost
+ estimate_num_insns_seq (gimple_omp_body (stmt), weights));
+ case GIMPLE_TRANSACTION:
+ return (weights->tm_cost
+ + estimate_num_insns_seq (gimple_transaction_body (stmt),
+ weights));
+
default:
gcc_unreachable ();
}
@@ -3639,6 +3650,7 @@ init_inline_once (void)
eni_size_weights.target_builtin_call_cost = 1;
eni_size_weights.div_mod_cost = 1;
eni_size_weights.omp_cost = 40;
+ eni_size_weights.tm_cost = 10;
eni_size_weights.time_based = false;
eni_size_weights.return_cost = 1;
@@ -3650,6 +3662,7 @@ init_inline_once (void)
eni_time_weights.target_builtin_call_cost = 1;
eni_time_weights.div_mod_cost = 10;
eni_time_weights.omp_cost = 40;
+ eni_time_weights.tm_cost = 40;
eni_time_weights.time_based = true;
eni_time_weights.return_cost = 2;
}
@@ -4041,9 +4054,7 @@ expand_call_inline (basic_block bb, gimple stmt, copy_body_data *id)
/* Expand call statements reachable from STMT_P.
We can only have CALL_EXPRs as the "toplevel" tree code or nested
- in a MODIFY_EXPR. See gimple.c:get_call_expr_in(). We can
- unfortunately not use that function here because we need a pointer
- to the CALL_EXPR, not the tree itself. */
+ in a MODIFY_EXPR. */
static bool
gimple_expand_calls_inline (basic_block bb, copy_body_data *id)
diff --git a/gcc/tree-inline.h b/gcc/tree-inline.h
index fb039e3194c..2aac5f8b9d3 100644
--- a/gcc/tree-inline.h
+++ b/gcc/tree-inline.h
@@ -144,6 +144,9 @@ typedef struct eni_weights_d
/* Cost for omp construct. */
unsigned omp_cost;
+ /* Cost for tm transaction. */
+ unsigned tm_cost;
+
/* Cost of return. */
unsigned return_cost;
diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h
index df1e24c7906..cc8847e8dff 100644
--- a/gcc/tree-pass.h
+++ b/gcc/tree-pass.h
@@ -447,6 +447,12 @@ extern struct gimple_opt_pass pass_build_cgraph_edges;
extern struct gimple_opt_pass pass_local_pure_const;
extern struct gimple_opt_pass pass_tracer;
extern struct gimple_opt_pass pass_warn_unused_result;
+extern struct gimple_opt_pass pass_diagnose_tm_blocks;
+extern struct gimple_opt_pass pass_lower_tm;
+extern struct gimple_opt_pass pass_tm_init;
+extern struct gimple_opt_pass pass_tm_mark;
+extern struct gimple_opt_pass pass_tm_memopt;
+extern struct gimple_opt_pass pass_tm_edges;
extern struct gimple_opt_pass pass_split_functions;
extern struct gimple_opt_pass pass_feedback_split_functions;
@@ -469,6 +475,7 @@ extern struct ipa_opt_pass_d pass_ipa_pure_const;
extern struct simple_ipa_opt_pass pass_ipa_pta;
extern struct ipa_opt_pass_d pass_ipa_lto_wpa_fixup;
extern struct ipa_opt_pass_d pass_ipa_lto_finish_out;
+extern struct simple_ipa_opt_pass pass_ipa_tm;
extern struct ipa_opt_pass_d pass_ipa_profile;
extern struct ipa_opt_pass_d pass_ipa_cdtor_merge;
diff --git a/gcc/tree-pretty-print.c b/gcc/tree-pretty-print.c
index 24d70c3ff38..d0546c6c1ca 100644
--- a/gcc/tree-pretty-print.c
+++ b/gcc/tree-pretty-print.c
@@ -2264,6 +2264,26 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags,
is_expr = false;
break;
+ case TRANSACTION_EXPR:
+ if (TRANSACTION_EXPR_OUTER (node))
+ pp_string (buffer, "__transaction_atomic [[outer]]");
+ else if (TRANSACTION_EXPR_RELAXED (node))
+ pp_string (buffer, "__transaction_relaxed");
+ else
+ pp_string (buffer, "__transaction_atomic");
+ if (!(flags & TDF_SLIM) && TRANSACTION_EXPR_BODY (node))
+ {
+ newline_and_indent (buffer, spc);
+ pp_character (buffer, '{');
+ newline_and_indent (buffer, spc + 2);
+ dump_generic_node (buffer, TRANSACTION_EXPR_BODY (node),
+ spc + 2, flags, false);
+ newline_and_indent (buffer, spc);
+ pp_character (buffer, '}');
+ }
+ is_expr = false;
+ break;
+
case REDUC_MAX_EXPR:
pp_string (buffer, " REDUC_MAX_EXPR < ");
dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false);
diff --git a/gcc/tree-ssa-alias.c b/gcc/tree-ssa-alias.c
index 57fc7341c54..cd222093579 100644
--- a/gcc/tree-ssa-alias.c
+++ b/gcc/tree-ssa-alias.c
@@ -1182,6 +1182,8 @@ ref_maybe_used_by_call_p_1 (gimple call, ao_ref *ref)
case BUILT_IN_MEMPCPY:
case BUILT_IN_STPCPY:
case BUILT_IN_STPNCPY:
+ case BUILT_IN_TM_MEMCPY:
+ case BUILT_IN_TM_MEMMOVE:
{
ao_ref dref;
tree size = NULL_TREE;
@@ -1228,6 +1230,32 @@ ref_maybe_used_by_call_p_1 (gimple call, ao_ref *ref)
size);
return refs_may_alias_p_1 (&dref, ref, false);
}
+
+ /* The following functions read memory pointed to by their
+ first argument. */
+ CASE_BUILT_IN_TM_LOAD (1):
+ CASE_BUILT_IN_TM_LOAD (2):
+ CASE_BUILT_IN_TM_LOAD (4):
+ CASE_BUILT_IN_TM_LOAD (8):
+ CASE_BUILT_IN_TM_LOAD (FLOAT):
+ CASE_BUILT_IN_TM_LOAD (DOUBLE):
+ CASE_BUILT_IN_TM_LOAD (LDOUBLE):
+ CASE_BUILT_IN_TM_LOAD (M64):
+ CASE_BUILT_IN_TM_LOAD (M128):
+ CASE_BUILT_IN_TM_LOAD (M256):
+ case BUILT_IN_TM_LOG:
+ case BUILT_IN_TM_LOG_1:
+ case BUILT_IN_TM_LOG_2:
+ case BUILT_IN_TM_LOG_4:
+ case BUILT_IN_TM_LOG_8:
+ case BUILT_IN_TM_LOG_FLOAT:
+ case BUILT_IN_TM_LOG_DOUBLE:
+ case BUILT_IN_TM_LOG_LDOUBLE:
+ case BUILT_IN_TM_LOG_M64:
+ case BUILT_IN_TM_LOG_M128:
+ case BUILT_IN_TM_LOG_M256:
+ return ptr_deref_may_alias_ref_p_1 (gimple_call_arg (call, 0), ref);
+
/* These read memory pointed to by the first argument. */
case BUILT_IN_STRDUP:
case BUILT_IN_STRNDUP:
@@ -1250,6 +1278,7 @@ ref_maybe_used_by_call_p_1 (gimple call, ao_ref *ref)
case BUILT_IN_STACK_SAVE:
case BUILT_IN_STACK_RESTORE:
case BUILT_IN_MEMSET:
+ case BUILT_IN_TM_MEMSET:
case BUILT_IN_MEMSET_CHK:
case BUILT_IN_FREXP:
case BUILT_IN_FREXPF:
@@ -1480,6 +1509,19 @@ call_may_clobber_ref_p_1 (gimple call, ao_ref *ref)
case BUILT_IN_STRCAT:
case BUILT_IN_STRNCAT:
case BUILT_IN_MEMSET:
+ case BUILT_IN_TM_MEMSET:
+ CASE_BUILT_IN_TM_STORE (1):
+ CASE_BUILT_IN_TM_STORE (2):
+ CASE_BUILT_IN_TM_STORE (4):
+ CASE_BUILT_IN_TM_STORE (8):
+ CASE_BUILT_IN_TM_STORE (FLOAT):
+ CASE_BUILT_IN_TM_STORE (DOUBLE):
+ CASE_BUILT_IN_TM_STORE (LDOUBLE):
+ CASE_BUILT_IN_TM_STORE (M64):
+ CASE_BUILT_IN_TM_STORE (M128):
+ CASE_BUILT_IN_TM_STORE (M256):
+ case BUILT_IN_TM_MEMCPY:
+ case BUILT_IN_TM_MEMMOVE:
{
ao_ref dref;
tree size = NULL_TREE;
diff --git a/gcc/tree-ssa-structalias.c b/gcc/tree-ssa-structalias.c
index 51f2aa1bf7c..fe0c4e9c389 100644
--- a/gcc/tree-ssa-structalias.c
+++ b/gcc/tree-ssa-structalias.c
@@ -4024,6 +4024,8 @@ find_func_aliases_for_builtin_call (gimple t)
case BUILT_IN_STPCPY_CHK:
case BUILT_IN_STRCAT_CHK:
case BUILT_IN_STRNCAT_CHK:
+ case BUILT_IN_TM_MEMCPY:
+ case BUILT_IN_TM_MEMMOVE:
{
tree res = gimple_call_lhs (t);
tree dest = gimple_call_arg (t, (DECL_FUNCTION_CODE (fndecl)
@@ -4056,6 +4058,7 @@ find_func_aliases_for_builtin_call (gimple t)
}
case BUILT_IN_MEMSET:
case BUILT_IN_MEMSET_CHK:
+ case BUILT_IN_TM_MEMSET:
{
tree res = gimple_call_lhs (t);
tree dest = gimple_call_arg (t, 0);
@@ -4197,6 +4200,50 @@ find_func_aliases_for_builtin_call (gimple t)
}
return true;
}
+ CASE_BUILT_IN_TM_STORE (1):
+ CASE_BUILT_IN_TM_STORE (2):
+ CASE_BUILT_IN_TM_STORE (4):
+ CASE_BUILT_IN_TM_STORE (8):
+ CASE_BUILT_IN_TM_STORE (FLOAT):
+ CASE_BUILT_IN_TM_STORE (DOUBLE):
+ CASE_BUILT_IN_TM_STORE (LDOUBLE):
+ CASE_BUILT_IN_TM_STORE (M64):
+ CASE_BUILT_IN_TM_STORE (M128):
+ CASE_BUILT_IN_TM_STORE (M256):
+ {
+ tree addr = gimple_call_arg (t, 0);
+ tree src = gimple_call_arg (t, 1);
+
+ get_constraint_for (addr, &lhsc);
+ do_deref (&lhsc);
+ get_constraint_for (src, &rhsc);
+ process_all_all_constraints (lhsc, rhsc);
+ VEC_free (ce_s, heap, lhsc);
+ VEC_free (ce_s, heap, rhsc);
+ return true;
+ }
+ CASE_BUILT_IN_TM_LOAD (1):
+ CASE_BUILT_IN_TM_LOAD (2):
+ CASE_BUILT_IN_TM_LOAD (4):
+ CASE_BUILT_IN_TM_LOAD (8):
+ CASE_BUILT_IN_TM_LOAD (FLOAT):
+ CASE_BUILT_IN_TM_LOAD (DOUBLE):
+ CASE_BUILT_IN_TM_LOAD (LDOUBLE):
+ CASE_BUILT_IN_TM_LOAD (M64):
+ CASE_BUILT_IN_TM_LOAD (M128):
+ CASE_BUILT_IN_TM_LOAD (M256):
+ {
+ tree dest = gimple_call_lhs (t);
+ tree addr = gimple_call_arg (t, 0);
+
+ get_constraint_for (dest, &lhsc);
+ get_constraint_for (addr, &rhsc);
+ do_deref (&rhsc);
+ process_all_all_constraints (lhsc, rhsc);
+ VEC_free (ce_s, heap, lhsc);
+ VEC_free (ce_s, heap, rhsc);
+ return true;
+ }
/* Variadic argument handling needs to be handled in IPA
mode as well. */
case BUILT_IN_VA_START:
diff --git a/gcc/tree.c b/gcc/tree.c
index 2cbd68b5fd9..ba6c2e1ef8a 100644
--- a/gcc/tree.c
+++ b/gcc/tree.c
@@ -9428,6 +9428,8 @@ local_define_builtin (const char *name, tree type, enum built_in_function code,
if (ecf_flags & ECF_LEAF)
DECL_ATTRIBUTES (decl) = tree_cons (get_identifier ("leaf"),
NULL, DECL_ATTRIBUTES (decl));
+ if ((ecf_flags & ECF_TM_PURE) && flag_tm)
+ apply_tm_attr (decl, get_identifier ("transaction_pure"));
set_builtin_decl (code, decl, true);
}
@@ -9593,7 +9595,8 @@ build_common_builtin_nodes (void)
ftype = build_function_type_list (ptr_type_node,
integer_type_node, NULL_TREE);
local_define_builtin ("__builtin_eh_pointer", ftype, BUILT_IN_EH_POINTER,
- "__builtin_eh_pointer", ECF_PURE | ECF_NOTHROW | ECF_LEAF);
+ "__builtin_eh_pointer",
+ ECF_PURE | ECF_NOTHROW | ECF_LEAF | ECF_TM_PURE);
tmp = lang_hooks.types.type_for_mode (targetm.eh_return_filter_mode (), 0);
ftype = build_function_type_list (tmp, integer_type_node, NULL_TREE);
@@ -11142,6 +11145,37 @@ tree_strip_sign_nop_conversions (tree exp)
return exp;
}
+/* Strip out all handled components that produce invariant
+ offsets. */
+
+const_tree
+strip_invariant_refs (const_tree op)
+{
+ while (handled_component_p (op))
+ {
+ switch (TREE_CODE (op))
+ {
+ case ARRAY_REF:
+ case ARRAY_RANGE_REF:
+ if (!is_gimple_constant (TREE_OPERAND (op, 1))
+ || TREE_OPERAND (op, 2) != NULL_TREE
+ || TREE_OPERAND (op, 3) != NULL_TREE)
+ return NULL;
+ break;
+
+ case COMPONENT_REF:
+ if (TREE_OPERAND (op, 2) != NULL_TREE)
+ return NULL;
+ break;
+
+ default:;
+ }
+ op = TREE_OPERAND (op, 0);
+ }
+
+ return op;
+}
+
static GTY(()) tree gcc_eh_personality_decl;
/* Return the GCC personality function decl. */
diff --git a/gcc/tree.def b/gcc/tree.def
index 77dc7d7489a..2a2363e7037 100644
--- a/gcc/tree.def
+++ b/gcc/tree.def
@@ -1076,6 +1076,10 @@ DEFTREECODE (OMP_ATOMIC_CAPTURE_NEW, "omp_atomic_capture_new", tcc_statement, 2)
/* OpenMP clauses. */
DEFTREECODE (OMP_CLAUSE, "omp_clause", tcc_exceptional, 0)
+/* TRANSACTION_EXPR tree code.
+ Operand 0: BODY: contains body of the transaction. */
+DEFTREECODE (TRANSACTION_EXPR, "transaction_expr", tcc_expression, 1)
+
/* Reduction operations.
Operations that take a vector of elements and "reduce" it to a scalar
result (e.g. summing the elements of the vector, finding the minimum over
diff --git a/gcc/tree.h b/gcc/tree.h
index 00b663726b8..3e1e225bd4e 100644
--- a/gcc/tree.h
+++ b/gcc/tree.h
@@ -539,6 +539,9 @@ struct GTY(()) tree_common {
ENUM_IS_SCOPED in
ENUMERAL_TYPE
+ TRANSACTION_EXPR_OUTER in
+ TRANSACTION_EXPR
+
public_flag:
TREE_OVERFLOW in
@@ -566,6 +569,9 @@ struct GTY(()) tree_common {
OMP_CLAUSE_PRIVATE_DEBUG in
OMP_CLAUSE_PRIVATE
+ TRANSACTION_EXPR_RELAXED in
+ TRANSACTION_EXPR
+
private_flag:
TREE_PRIVATE in
@@ -1809,6 +1815,14 @@ extern void protected_set_expr_location (tree, location_t);
#define CALL_EXPR_ARGP(NODE) \
(&(TREE_OPERAND (CALL_EXPR_CHECK (NODE), 0)) + 3)
+/* TM directives and accessors. */
+#define TRANSACTION_EXPR_BODY(NODE) \
+ TREE_OPERAND (TRANSACTION_EXPR_CHECK (NODE), 0)
+#define TRANSACTION_EXPR_OUTER(NODE) \
+ (TRANSACTION_EXPR_CHECK (NODE)->base.static_flag)
+#define TRANSACTION_EXPR_RELAXED(NODE) \
+ (TRANSACTION_EXPR_CHECK (NODE)->base.public_flag)
+
/* OpenMP directive and clause accessors. */
#define OMP_BODY(NODE) \
@@ -3455,6 +3469,29 @@ struct GTY(())
#define DECL_NO_INLINE_WARNING_P(NODE) \
(FUNCTION_DECL_CHECK (NODE)->function_decl.no_inline_warning_flag)
+/* Nonzero if a FUNCTION_CODE is a TM load/store. */
+#define BUILTIN_TM_LOAD_STORE_P(FN) \
+ ((FN) >= BUILT_IN_TM_STORE_1 && (FN) <= BUILT_IN_TM_LOAD_RFW_LDOUBLE)
+
+/* Nonzero if a FUNCTION_CODE is a TM load. */
+#define BUILTIN_TM_LOAD_P(FN) \
+ ((FN) >= BUILT_IN_TM_LOAD_1 && (FN) <= BUILT_IN_TM_LOAD_RFW_LDOUBLE)
+
+/* Nonzero if a FUNCTION_CODE is a TM store. */
+#define BUILTIN_TM_STORE_P(FN) \
+ ((FN) >= BUILT_IN_TM_STORE_1 && (FN) <= BUILT_IN_TM_STORE_WAW_LDOUBLE)
+
+#define CASE_BUILT_IN_TM_LOAD(FN) \
+ case BUILT_IN_TM_LOAD_##FN: \
+ case BUILT_IN_TM_LOAD_RAR_##FN: \
+ case BUILT_IN_TM_LOAD_RAW_##FN: \
+ case BUILT_IN_TM_LOAD_RFW_##FN
+
+#define CASE_BUILT_IN_TM_STORE(FN) \
+ case BUILT_IN_TM_STORE_##FN: \
+ case BUILT_IN_TM_STORE_WAR_##FN: \
+ case BUILT_IN_TM_STORE_WAW_##FN
+
/* Nonzero in a FUNCTION_DECL that should be always inlined by the inliner
disregarding size and cost heuristics. This is equivalent to using
the always_inline attribute without the required diagnostics if the
@@ -3542,8 +3579,9 @@ struct GTY(()) tree_function_decl {
unsigned pure_flag : 1;
unsigned looping_const_or_pure_flag : 1;
unsigned has_debug_args_flag : 1;
+ unsigned tm_clone_flag : 1;
- /* 2 bits left */
+ /* 1 bit left */
};
/* The source language of the translation-unit. */
@@ -5153,6 +5191,7 @@ extern bool auto_var_in_fn_p (const_tree, const_tree);
extern tree build_low_bits_mask (tree, unsigned);
extern tree tree_strip_nop_conversions (tree);
extern tree tree_strip_sign_nop_conversions (tree);
+extern const_tree strip_invariant_refs (const_tree);
extern tree lhd_gcc_personality (void);
extern void assign_assembler_name_if_neeeded (tree);
extern void warn_deprecated_use (tree, tree);
@@ -5178,6 +5217,25 @@ extern void expand_return (tree);
/* In tree-eh.c */
extern void using_eh_for_cleanups (void);
+/* Compare and hash for any structure which begins with a canonical
+ pointer. Assumes all pointers are interchangeable, which is sort
+ of already assumed by gcc elsewhere IIRC. */
+
+static inline int
+struct_ptr_eq (const void *a, const void *b)
+{
+ const void * const * x = (const void * const *) a;
+ const void * const * y = (const void * const *) b;
+ return *x == *y;
+}
+
+static inline hashval_t
+struct_ptr_hash (const void *a)
+{
+ const void * const * x = (const void * const *) a;
+ return (intptr_t)*x >> 4;
+}
+
/* In fold-const.c */
/* Non-zero if we are folding constants inside an initializer; zero
@@ -5546,6 +5604,10 @@ extern tree build_duplicate_type (tree);
#define ECF_NOVOPS (1 << 9)
/* The function does not lead to calls within current function unit. */
#define ECF_LEAF (1 << 10)
+/* Nonzero if this call does not affect transactions. */
+#define ECF_TM_PURE (1 << 11)
+/* Nonzero if this call is into the transaction runtime library. */
+#define ECF_TM_BUILTIN (1 << 12)
extern int flags_from_decl_or_type (const_tree);
extern int call_expr_flags (const_tree);
@@ -5596,6 +5658,8 @@ extern void init_attributes (void);
a decl attribute to the declaration rather than to its type). */
extern tree decl_attributes (tree *, tree, int);
+extern void apply_tm_attr (tree, tree);
+
/* In integrate.c */
extern void set_decl_abstract_flags (tree, int);
extern void set_decl_origin_self (tree);
@@ -5808,6 +5872,21 @@ extern unsigned HOST_WIDE_INT compute_builtin_object_size (tree, int);
extern unsigned HOST_WIDE_INT highest_pow2_factor (const_tree);
extern tree build_personality_function (const char *);
+/* In trans-mem.c. */
+extern tree build_tm_abort_call (location_t, bool);
+extern bool is_tm_safe (const_tree);
+extern bool is_tm_pure (const_tree);
+extern bool is_tm_may_cancel_outer (tree);
+extern bool is_tm_ending_fndecl (tree);
+extern void record_tm_replacement (tree, tree);
+extern void tm_malloc_replacement (tree);
+
+static inline bool
+is_tm_safe_or_pure (const_tree x)
+{
+ return is_tm_safe (x) || is_tm_pure (x);
+}
+
/* In tree-inline.c. */
void init_inline_once (void);
diff --git a/gcc/varasm.c b/gcc/varasm.c
index d0de2136bd8..ed27dce7b0a 100644
--- a/gcc/varasm.c
+++ b/gcc/varasm.c
@@ -5859,6 +5859,162 @@ assemble_alias (tree decl, tree target)
}
}
+/* Record and output a table of translations from original function
+ to its transaction aware clone. Note that tm_pure functions are
+ considered to be their own clone. */
+
+static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
+ htab_t tm_clone_hash;
+
+void
+record_tm_clone_pair (tree o, tree n)
+{
+ struct tree_map **slot, *h;
+
+ if (tm_clone_hash == NULL)
+ tm_clone_hash = htab_create_ggc (32, tree_map_hash, tree_map_eq, 0);
+
+ h = ggc_alloc_tree_map ();
+ h->hash = htab_hash_pointer (o);
+ h->base.from = o;
+ h->to = n;
+
+ slot = (struct tree_map **)
+ htab_find_slot_with_hash (tm_clone_hash, h, h->hash, INSERT);
+ *slot = h;
+}
+
+tree
+get_tm_clone_pair (tree o)
+{
+ if (tm_clone_hash)
+ {
+ struct tree_map *h, in;
+
+ in.base.from = o;
+ in.hash = htab_hash_pointer (o);
+ h = (struct tree_map *) htab_find_with_hash (tm_clone_hash,
+ &in, in.hash);
+ if (h)
+ return h->to;
+ }
+ return NULL_TREE;
+}
+
+typedef struct tm_alias_pair
+{
+ unsigned int uid;
+ tree from;
+ tree to;
+} tm_alias_pair;
+
+DEF_VEC_O(tm_alias_pair);
+DEF_VEC_ALLOC_O(tm_alias_pair,heap);
+
+/* Helper function for finish_tm_clone_pairs. Dump a hash table entry
+ into a VEC in INFO. */
+
+static int
+dump_tm_clone_to_vec (void **slot, void *info)
+{
+ struct tree_map *map = (struct tree_map *) *slot;
+ VEC(tm_alias_pair,heap) **tm_alias_pairs
+ = (VEC(tm_alias_pair, heap) **) info;
+ tm_alias_pair *p;
+
+ p = VEC_safe_push (tm_alias_pair, heap, *tm_alias_pairs, NULL);
+ p->from = map->base.from;
+ p->to = map->to;
+ p->uid = DECL_UID (p->from);
+ return 1;
+}
+
+/* Dump the actual pairs to the .tm_clone_table section. */
+
+static void
+dump_tm_clone_pairs (VEC(tm_alias_pair,heap) *tm_alias_pairs)
+{
+ unsigned i;
+ tm_alias_pair *p;
+ bool switched = false;
+
+ FOR_EACH_VEC_ELT (tm_alias_pair, tm_alias_pairs, i, p)
+ {
+ tree src = p->from;
+ tree dst = p->to;
+ struct cgraph_node *src_n = cgraph_get_node (src);
+ struct cgraph_node *dst_n = cgraph_get_node (dst);
+
+ /* The function ipa_tm_create_version() marks the clone as needed if
+ the original function was needed. But we also mark the clone as
+ needed if we ever called the clone indirectly through
+ TM_GETTMCLONE. If neither of these are true, we didn't generate
+ a clone, and we didn't call it indirectly... no sense keeping it
+ in the clone table. */
+ if (!dst_n || !dst_n->needed)
+ continue;
+
+ /* This covers the case where we have optimized the original
+ function away, and only access the transactional clone. */
+ if (!src_n || !src_n->needed)
+ continue;
+
+ if (!switched)
+ {
+ switch_to_section (get_named_section (NULL, ".tm_clone_table", 3));
+ assemble_align (POINTER_SIZE);
+ switched = true;
+ }
+
+ assemble_integer (XEXP (DECL_RTL (src), 0),
+ POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
+ assemble_integer (XEXP (DECL_RTL (dst), 0),
+ POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
+ }
+}
+
+/* Helper comparison function for qsorting by the DECL_UID stored in
+ alias_pair->emitted_diags. */
+
+static int
+tm_alias_pair_cmp (const void *x, const void *y)
+{
+ const tm_alias_pair *p1 = (const tm_alias_pair *) x;
+ const tm_alias_pair *p2 = (const tm_alias_pair *) y;
+ if (p1->uid < p2->uid)
+ return -1;
+ if (p1->uid > p2->uid)
+ return 1;
+ return 0;
+}
+
+void
+finish_tm_clone_pairs (void)
+{
+ VEC(tm_alias_pair,heap) *tm_alias_pairs = NULL;
+
+ if (tm_clone_hash == NULL)
+ return;
+
+ /* We need a determenistic order for the .tm_clone_table, otherwise
+ we will get bootstrap comparison failures, so dump the hash table
+ to a vector, sort it, and dump the vector. */
+
+ /* Dump the hashtable to a vector. */
+ htab_traverse_noresize (tm_clone_hash, dump_tm_clone_to_vec,
+ (void *) &tm_alias_pairs);
+ /* Sort it. */
+ VEC_qsort (tm_alias_pair, tm_alias_pairs, tm_alias_pair_cmp);
+
+ /* Dump it. */
+ dump_tm_clone_pairs (tm_alias_pairs);
+
+ htab_delete (tm_clone_hash);
+ tm_clone_hash = NULL;
+ VEC_free (tm_alias_pair, heap, tm_alias_pairs);
+}
+
+
/* Emit an assembler directive to set symbol for DECL visibility to
the visibility type VIS, which must not be VISIBILITY_DEFAULT. */