2 Copyright (C) 2003-2015 Free Software Foundation, Inc.
3 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 /* Loop and basic block vectorizer.
23 This file contains drivers for the three vectorizers:
24 (1) loop vectorizer (inter-iteration parallelism),
25 (2) loop-aware SLP (intra-iteration parallelism) (invoked by the loop
27 (3) BB vectorizer (out-of-loops), aka SLP
29 The rest of the vectorizer's code is organized as follows:
30 - tree-vect-loop.c - loop specific parts such as reductions, etc. These are
31 used by drivers (1) and (2).
32 - tree-vect-loop-manip.c - vectorizer's loop control-flow utilities, used by
34 - tree-vect-slp.c - BB vectorization specific analysis and transformation,
35 used by drivers (2) and (3).
36 - tree-vect-stmts.c - statements analysis and transformation (used by all).
37 - tree-vect-data-refs.c - vectorizer specific data-refs analysis and
38 manipulations (used by all).
39 - tree-vect-patterns.c - vectorizable code patterns detector (used by all)
41 Here's a poor attempt at illustrating that:
44 loop_vect() loop_aware_slp() slp_vect()
47 tree-vect-loop.c tree-vect-slp.c
52 tree-vect-stmts.c tree-vect-data-refs.c
59 #include "coretypes.h"
65 #include "fold-const.h"
66 #include "stor-layout.h"
67 #include "tree-pretty-print.h"
69 #include "hard-reg-set.h"
71 #include "dominance.h"
73 #include "basic-block.h"
74 #include "tree-ssa-alias.h"
75 #include "internal-fn.h"
76 #include "gimple-expr.h"
78 #include "gimple-iterator.h"
79 #include "gimple-walk.h"
80 #include "gimple-ssa.h"
81 #include "plugin-api.h"
84 #include "tree-phinodes.h"
85 #include "ssa-iterators.h"
86 #include "tree-ssa-loop-manip.h"
89 #include "tree-vectorizer.h"
90 #include "tree-pass.h"
91 #include "tree-ssa-propagate.h"
93 #include "gimple-fold.h"
94 #include "tree-scalar-evolution.h"
97 /* Loop or bb location. */
98 source_location vect_location
;
100 /* Vector mapping GIMPLE stmt to stmt_vec_info. */
101 vec
<vec_void_p
> stmt_vec_info_vec
;
103 /* For mapping simduid to vectorization factor. */
105 struct simduid_to_vf
: typed_free_remove
<simduid_to_vf
>
107 unsigned int simduid
;
110 /* hash_table support. */
111 typedef simduid_to_vf
*value_type
;
112 typedef simduid_to_vf
*compare_type
;
113 static inline hashval_t
hash (const simduid_to_vf
*);
114 static inline int equal (const simduid_to_vf
*, const simduid_to_vf
*);
118 simduid_to_vf::hash (const simduid_to_vf
*p
)
124 simduid_to_vf::equal (const simduid_to_vf
*p1
, const simduid_to_vf
*p2
)
126 return p1
->simduid
== p2
->simduid
;
129 /* This hash maps the OMP simd array to the corresponding simduid used
130 to index into it. Like thus,
132 _7 = GOMP_SIMD_LANE (simduid.0)
138 This hash maps from the OMP simd array (D.1737[]) to DECL_UID of
141 struct simd_array_to_simduid
: typed_free_remove
<simd_array_to_simduid
>
144 unsigned int simduid
;
146 /* hash_table support. */
147 typedef simd_array_to_simduid
*value_type
;
148 typedef simd_array_to_simduid
*compare_type
;
149 static inline hashval_t
hash (const simd_array_to_simduid
*);
150 static inline int equal (const simd_array_to_simduid
*,
151 const simd_array_to_simduid
*);
155 simd_array_to_simduid::hash (const simd_array_to_simduid
*p
)
157 return DECL_UID (p
->decl
);
161 simd_array_to_simduid::equal (const simd_array_to_simduid
*p1
,
162 const simd_array_to_simduid
*p2
)
164 return p1
->decl
== p2
->decl
;
167 /* Fold IFN_GOMP_SIMD_LANE, IFN_GOMP_SIMD_VF and IFN_GOMP_SIMD_LAST_LANE
168 into their corresponding constants. */
171 adjust_simduid_builtins (hash_table
<simduid_to_vf
> *htab
)
175 FOR_EACH_BB_FN (bb
, cfun
)
177 gimple_stmt_iterator i
;
179 for (i
= gsi_start_bb (bb
); !gsi_end_p (i
); gsi_next (&i
))
182 enum internal_fn ifn
;
183 gimple stmt
= gsi_stmt (i
);
185 if (!is_gimple_call (stmt
)
186 || !gimple_call_internal_p (stmt
))
188 ifn
= gimple_call_internal_fn (stmt
);
191 case IFN_GOMP_SIMD_LANE
:
192 case IFN_GOMP_SIMD_VF
:
193 case IFN_GOMP_SIMD_LAST_LANE
:
198 tree arg
= gimple_call_arg (stmt
, 0);
199 gcc_assert (arg
!= NULL_TREE
);
200 gcc_assert (TREE_CODE (arg
) == SSA_NAME
);
201 simduid_to_vf
*p
= NULL
, data
;
202 data
.simduid
= DECL_UID (SSA_NAME_VAR (arg
));
205 p
= htab
->find (&data
);
211 case IFN_GOMP_SIMD_VF
:
212 t
= build_int_cst (unsigned_type_node
, vf
);
214 case IFN_GOMP_SIMD_LANE
:
215 t
= build_int_cst (unsigned_type_node
, 0);
217 case IFN_GOMP_SIMD_LAST_LANE
:
218 t
= gimple_call_arg (stmt
, 1);
223 update_call_from_tree (&i
, t
);
228 /* Helper structure for note_simd_array_uses. */
230 struct note_simd_array_uses_struct
232 hash_table
<simd_array_to_simduid
> **htab
;
233 unsigned int simduid
;
236 /* Callback for note_simd_array_uses, called through walk_gimple_op. */
239 note_simd_array_uses_cb (tree
*tp
, int *walk_subtrees
, void *data
)
241 struct walk_stmt_info
*wi
= (struct walk_stmt_info
*) data
;
242 struct note_simd_array_uses_struct
*ns
243 = (struct note_simd_array_uses_struct
*) wi
->info
;
248 && lookup_attribute ("omp simd array", DECL_ATTRIBUTES (*tp
))
249 && DECL_CONTEXT (*tp
) == current_function_decl
)
251 simd_array_to_simduid data
;
253 *ns
->htab
= new hash_table
<simd_array_to_simduid
> (15);
255 data
.simduid
= ns
->simduid
;
256 simd_array_to_simduid
**slot
= (*ns
->htab
)->find_slot (&data
, INSERT
);
259 simd_array_to_simduid
*p
= XNEW (simd_array_to_simduid
);
263 else if ((*slot
)->simduid
!= ns
->simduid
)
264 (*slot
)->simduid
= -1U;
270 /* Find "omp simd array" temporaries and map them to corresponding
274 note_simd_array_uses (hash_table
<simd_array_to_simduid
> **htab
)
277 gimple_stmt_iterator gsi
;
278 struct walk_stmt_info wi
;
279 struct note_simd_array_uses_struct ns
;
281 memset (&wi
, 0, sizeof (wi
));
285 FOR_EACH_BB_FN (bb
, cfun
)
286 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
288 gimple stmt
= gsi_stmt (gsi
);
289 if (!is_gimple_call (stmt
) || !gimple_call_internal_p (stmt
))
291 switch (gimple_call_internal_fn (stmt
))
293 case IFN_GOMP_SIMD_LANE
:
294 case IFN_GOMP_SIMD_VF
:
295 case IFN_GOMP_SIMD_LAST_LANE
:
300 tree lhs
= gimple_call_lhs (stmt
);
301 if (lhs
== NULL_TREE
)
303 imm_use_iterator use_iter
;
305 ns
.simduid
= DECL_UID (SSA_NAME_VAR (gimple_call_arg (stmt
, 0)));
306 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, lhs
)
307 if (!is_gimple_debug (use_stmt
))
308 walk_gimple_op (use_stmt
, note_simd_array_uses_cb
, &wi
);
312 /* Shrink arrays with "omp simd array" attribute to the corresponding
313 vectorization factor. */
317 (hash_table
<simd_array_to_simduid
> *simd_array_to_simduid_htab
,
318 hash_table
<simduid_to_vf
> *simduid_to_vf_htab
)
320 for (hash_table
<simd_array_to_simduid
>::iterator iter
321 = simd_array_to_simduid_htab
->begin ();
322 iter
!= simd_array_to_simduid_htab
->end (); ++iter
)
323 if ((*iter
)->simduid
!= -1U)
325 tree decl
= (*iter
)->decl
;
327 if (simduid_to_vf_htab
)
329 simduid_to_vf
*p
= NULL
, data
;
330 data
.simduid
= (*iter
)->simduid
;
331 p
= simduid_to_vf_htab
->find (&data
);
336 = build_array_type_nelts (TREE_TYPE (TREE_TYPE (decl
)), vf
);
337 TREE_TYPE (decl
) = atype
;
338 relayout_decl (decl
);
341 delete simd_array_to_simduid_htab
;
344 /* A helper function to free data refs. */
347 vect_destroy_datarefs (loop_vec_info loop_vinfo
, bb_vec_info bb_vinfo
)
349 vec
<data_reference_p
> datarefs
;
350 struct data_reference
*dr
;
354 datarefs
= LOOP_VINFO_DATAREFS (loop_vinfo
);
356 datarefs
= BB_VINFO_DATAREFS (bb_vinfo
);
358 FOR_EACH_VEC_ELT (datarefs
, i
, dr
)
365 free_data_refs (datarefs
);
369 /* If LOOP has been versioned during ifcvt, return the internal call
373 vect_loop_vectorized_call (struct loop
*loop
)
375 basic_block bb
= loop_preheader_edge (loop
)->src
;
382 if (!single_pred_p (bb
))
384 bb
= single_pred (bb
);
387 if (g
&& gimple_code (g
) == GIMPLE_COND
)
389 gimple_stmt_iterator gsi
= gsi_for_stmt (g
);
391 if (!gsi_end_p (gsi
))
394 if (is_gimple_call (g
)
395 && gimple_call_internal_p (g
)
396 && gimple_call_internal_fn (g
) == IFN_LOOP_VECTORIZED
397 && (tree_to_shwi (gimple_call_arg (g
, 0)) == loop
->num
398 || tree_to_shwi (gimple_call_arg (g
, 1)) == loop
->num
))
405 /* Fold LOOP_VECTORIZED internal call G to VALUE and
406 update any immediate uses of it's LHS. */
409 fold_loop_vectorized_call (gimple g
, tree value
)
411 tree lhs
= gimple_call_lhs (g
);
413 imm_use_iterator iter
;
415 gimple_stmt_iterator gsi
= gsi_for_stmt (g
);
417 update_call_from_tree (&gsi
, value
);
418 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
420 FOR_EACH_IMM_USE_ON_STMT (use_p
, iter
)
421 SET_USE (use_p
, value
);
422 update_stmt (use_stmt
);
425 /* Set the uids of all the statements in basic blocks inside loop
426 represented by LOOP_VINFO. LOOP_VECTORIZED_CALL is the internal
427 call guarding the loop which has been if converted. */
429 set_uid_loop_bbs (loop_vec_info loop_vinfo
, gimple loop_vectorized_call
)
431 tree arg
= gimple_call_arg (loop_vectorized_call
, 1);
434 struct loop
*scalar_loop
= get_loop (cfun
, tree_to_shwi (arg
));
436 LOOP_VINFO_SCALAR_LOOP (loop_vinfo
) = scalar_loop
;
437 gcc_checking_assert (vect_loop_vectorized_call
438 (LOOP_VINFO_SCALAR_LOOP (loop_vinfo
))
439 == loop_vectorized_call
);
440 bbs
= get_loop_body (scalar_loop
);
441 for (i
= 0; i
< scalar_loop
->num_nodes
; i
++)
443 basic_block bb
= bbs
[i
];
444 gimple_stmt_iterator gsi
;
445 for (gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
447 gimple phi
= gsi_stmt (gsi
);
448 gimple_set_uid (phi
, 0);
450 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
452 gimple stmt
= gsi_stmt (gsi
);
453 gimple_set_uid (stmt
, 0);
459 /* Function vectorize_loops.
461 Entry point to loop vectorization phase. */
464 vectorize_loops (void)
467 unsigned int num_vectorized_loops
= 0;
468 unsigned int vect_loops_num
;
470 hash_table
<simduid_to_vf
> *simduid_to_vf_htab
= NULL
;
471 hash_table
<simd_array_to_simduid
> *simd_array_to_simduid_htab
= NULL
;
472 bool any_ifcvt_loops
= false;
475 vect_loops_num
= number_of_loops (cfun
);
477 /* Bail out if there are no loops. */
478 if (vect_loops_num
<= 1)
481 if (cfun
->has_simduid_loops
)
482 note_simd_array_uses (&simd_array_to_simduid_htab
);
484 init_stmt_vec_info_vec ();
486 /* ----------- Analyze loops. ----------- */
488 /* If some loop was duplicated, it gets bigger number
489 than all previously defined loops. This fact allows us to run
490 only over initial loops skipping newly generated ones. */
491 FOR_EACH_LOOP (loop
, 0)
492 if (loop
->dont_vectorize
)
493 any_ifcvt_loops
= true;
494 else if ((flag_tree_loop_vectorize
495 && optimize_loop_nest_for_speed_p (loop
))
496 || loop
->force_vectorize
)
498 loop_vec_info loop_vinfo
;
499 vect_location
= find_loop_location (loop
);
500 if (LOCATION_LOCUS (vect_location
) != UNKNOWN_LOCATION
501 && dump_enabled_p ())
502 dump_printf (MSG_NOTE
, "\nAnalyzing loop at %s:%d\n",
503 LOCATION_FILE (vect_location
),
504 LOCATION_LINE (vect_location
));
506 loop_vinfo
= vect_analyze_loop (loop
);
507 loop
->aux
= loop_vinfo
;
509 if (!loop_vinfo
|| !LOOP_VINFO_VECTORIZABLE_P (loop_vinfo
))
512 if (!dbg_cnt (vect_loop
))
515 gimple loop_vectorized_call
= vect_loop_vectorized_call (loop
);
516 if (loop_vectorized_call
)
517 set_uid_loop_bbs (loop_vinfo
, loop_vectorized_call
);
518 if (LOCATION_LOCUS (vect_location
) != UNKNOWN_LOCATION
519 && dump_enabled_p ())
520 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS
, vect_location
,
521 "loop vectorized\n");
522 vect_transform_loop (loop_vinfo
);
523 num_vectorized_loops
++;
524 /* Now that the loop has been vectorized, allow it to be unrolled
526 loop
->force_vectorize
= false;
530 simduid_to_vf
*simduid_to_vf_data
= XNEW (simduid_to_vf
);
531 if (!simduid_to_vf_htab
)
532 simduid_to_vf_htab
= new hash_table
<simduid_to_vf
> (15);
533 simduid_to_vf_data
->simduid
= DECL_UID (loop
->simduid
);
534 simduid_to_vf_data
->vf
= loop_vinfo
->vectorization_factor
;
535 *simduid_to_vf_htab
->find_slot (simduid_to_vf_data
, INSERT
)
536 = simduid_to_vf_data
;
539 if (loop_vectorized_call
)
541 fold_loop_vectorized_call (loop_vectorized_call
, boolean_true_node
);
542 ret
|= TODO_cleanup_cfg
;
546 vect_location
= UNKNOWN_LOCATION
;
548 statistics_counter_event (cfun
, "Vectorized loops", num_vectorized_loops
);
549 if (dump_enabled_p ()
550 || (num_vectorized_loops
> 0 && dump_enabled_p ()))
551 dump_printf_loc (MSG_NOTE
, vect_location
,
552 "vectorized %u loops in function.\n",
553 num_vectorized_loops
);
555 /* ----------- Finalize. ----------- */
558 for (i
= 1; i
< vect_loops_num
; i
++)
560 loop
= get_loop (cfun
, i
);
561 if (loop
&& loop
->dont_vectorize
)
563 gimple g
= vect_loop_vectorized_call (loop
);
566 fold_loop_vectorized_call (g
, boolean_false_node
);
567 ret
|= TODO_cleanup_cfg
;
572 for (i
= 1; i
< vect_loops_num
; i
++)
574 loop_vec_info loop_vinfo
;
576 loop
= get_loop (cfun
, i
);
579 loop_vinfo
= (loop_vec_info
) loop
->aux
;
580 destroy_loop_vec_info (loop_vinfo
, true);
584 free_stmt_vec_info_vec ();
586 /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE} builtins. */
587 if (cfun
->has_simduid_loops
)
588 adjust_simduid_builtins (simduid_to_vf_htab
);
590 /* Shrink any "omp array simd" temporary arrays to the
591 actual vectorization factors. */
592 if (simd_array_to_simduid_htab
)
593 shrink_simd_arrays (simd_array_to_simduid_htab
, simduid_to_vf_htab
);
594 delete simduid_to_vf_htab
;
595 cfun
->has_simduid_loops
= false;
597 if (num_vectorized_loops
> 0)
599 /* If we vectorized any loop only virtual SSA form needs to be updated.
600 ??? Also while we try hard to update loop-closed SSA form we fail
601 to properly do this in some corner-cases (see PR56286). */
602 rewrite_into_loop_closed_ssa (NULL
, TODO_update_ssa_only_virtuals
);
603 return TODO_cleanup_cfg
;
610 /* Entry point to the simduid cleanup pass. */
614 const pass_data pass_data_simduid_cleanup
=
616 GIMPLE_PASS
, /* type */
617 "simduid", /* name */
618 OPTGROUP_NONE
, /* optinfo_flags */
620 ( PROP_ssa
| PROP_cfg
), /* properties_required */
621 0, /* properties_provided */
622 0, /* properties_destroyed */
623 0, /* todo_flags_start */
624 0, /* todo_flags_finish */
627 class pass_simduid_cleanup
: public gimple_opt_pass
630 pass_simduid_cleanup (gcc::context
*ctxt
)
631 : gimple_opt_pass (pass_data_simduid_cleanup
, ctxt
)
634 /* opt_pass methods: */
635 opt_pass
* clone () { return new pass_simduid_cleanup (m_ctxt
); }
636 virtual bool gate (function
*fun
) { return fun
->has_simduid_loops
; }
637 virtual unsigned int execute (function
*);
639 }; // class pass_simduid_cleanup
642 pass_simduid_cleanup::execute (function
*fun
)
644 hash_table
<simd_array_to_simduid
> *simd_array_to_simduid_htab
= NULL
;
646 note_simd_array_uses (&simd_array_to_simduid_htab
);
648 /* Fold IFN_GOMP_SIMD_{VF,LANE,LAST_LANE} builtins. */
649 adjust_simduid_builtins (NULL
);
651 /* Shrink any "omp array simd" temporary arrays to the
652 actual vectorization factors. */
653 if (simd_array_to_simduid_htab
)
654 shrink_simd_arrays (simd_array_to_simduid_htab
, NULL
);
655 fun
->has_simduid_loops
= false;
662 make_pass_simduid_cleanup (gcc::context
*ctxt
)
664 return new pass_simduid_cleanup (ctxt
);
668 /* Entry point to basic block SLP phase. */
672 const pass_data pass_data_slp_vectorize
=
674 GIMPLE_PASS
, /* type */
676 OPTGROUP_LOOP
| OPTGROUP_VEC
, /* optinfo_flags */
677 TV_TREE_SLP_VECTORIZATION
, /* tv_id */
678 ( PROP_ssa
| PROP_cfg
), /* properties_required */
679 0, /* properties_provided */
680 0, /* properties_destroyed */
681 0, /* todo_flags_start */
682 TODO_update_ssa
, /* todo_flags_finish */
685 class pass_slp_vectorize
: public gimple_opt_pass
688 pass_slp_vectorize (gcc::context
*ctxt
)
689 : gimple_opt_pass (pass_data_slp_vectorize
, ctxt
)
692 /* opt_pass methods: */
693 opt_pass
* clone () { return new pass_slp_vectorize (m_ctxt
); }
694 virtual bool gate (function
*) { return flag_tree_slp_vectorize
!= 0; }
695 virtual unsigned int execute (function
*);
697 }; // class pass_slp_vectorize
700 pass_slp_vectorize::execute (function
*fun
)
704 bool in_loop_pipeline
= scev_initialized_p ();
705 if (!in_loop_pipeline
)
707 loop_optimizer_init (LOOPS_NORMAL
);
711 init_stmt_vec_info_vec ();
713 FOR_EACH_BB_FN (bb
, fun
)
715 vect_location
= find_bb_location (bb
);
717 if (vect_slp_analyze_bb (bb
))
719 if (!dbg_cnt (vect_slp
))
722 vect_slp_transform_bb (bb
);
723 if (dump_enabled_p ())
724 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS
, vect_location
,
725 "basic block vectorized\n");
729 free_stmt_vec_info_vec ();
731 if (!in_loop_pipeline
)
734 loop_optimizer_finalize ();
743 make_pass_slp_vectorize (gcc::context
*ctxt
)
745 return new pass_slp_vectorize (ctxt
);
749 /* Increase alignment of global arrays to improve vectorization potential.
751 - Consider also structs that have an array field.
752 - Use ipa analysis to prune arrays that can't be vectorized?
753 This should involve global alignment analysis and in the future also
757 increase_alignment (void)
761 vect_location
= UNKNOWN_LOCATION
;
763 /* Increase the alignment of all global arrays for vectorization. */
764 FOR_EACH_DEFINED_VARIABLE (vnode
)
766 tree vectype
, decl
= vnode
->decl
;
768 unsigned int alignment
;
770 t
= TREE_TYPE (decl
);
771 if (TREE_CODE (t
) != ARRAY_TYPE
)
773 vectype
= get_vectype_for_scalar_type (strip_array_types (t
));
776 alignment
= TYPE_ALIGN (vectype
);
777 if (DECL_ALIGN (decl
) >= alignment
)
780 if (vect_can_force_dr_alignment_p (decl
, alignment
))
782 vnode
->increase_alignment (TYPE_ALIGN (vectype
));
783 dump_printf (MSG_NOTE
, "Increasing alignment of decl: ");
784 dump_generic_expr (MSG_NOTE
, TDF_SLIM
, decl
);
785 dump_printf (MSG_NOTE
, "\n");
794 const pass_data pass_data_ipa_increase_alignment
=
796 SIMPLE_IPA_PASS
, /* type */
797 "increase_alignment", /* name */
798 OPTGROUP_LOOP
| OPTGROUP_VEC
, /* optinfo_flags */
799 TV_IPA_OPT
, /* tv_id */
800 0, /* properties_required */
801 0, /* properties_provided */
802 0, /* properties_destroyed */
803 0, /* todo_flags_start */
804 0, /* todo_flags_finish */
807 class pass_ipa_increase_alignment
: public simple_ipa_opt_pass
810 pass_ipa_increase_alignment (gcc::context
*ctxt
)
811 : simple_ipa_opt_pass (pass_data_ipa_increase_alignment
, ctxt
)
814 /* opt_pass methods: */
815 virtual bool gate (function
*)
817 return flag_section_anchors
&& flag_tree_loop_vectorize
;
820 virtual unsigned int execute (function
*) { return increase_alignment (); }
822 }; // class pass_ipa_increase_alignment
826 simple_ipa_opt_pass
*
827 make_pass_ipa_increase_alignment (gcc::context
*ctxt
)
829 return new pass_ipa_increase_alignment (ctxt
);