1 From 2170230e196ecd0052c840136a4902295349b3e7 Mon Sep 17 00:00:00 2001
2 From: Richard Lowe <richlowe@richlowe.net>
3 Date: Sat, 27 Oct 2012 02:44:09 +0100
4 Subject: [PATCH 05/11] Implement -fstrict-calling-conventions
6 Stock GCC is overly willing to violate the ABI when calling local functions,
7 such that it passes arguments in registers on i386. This hampers debugging
8 with anything other than a fully-aware DWARF debugger, and is generally not
11 Implement a flag which disables this behaviour, enabled by default. The flag is
12 global, though only effective on i386, to more easily allow its globalization
13 later which, given the odds, is likely to be necessary.
16 gcc/config/i386/i386.c | 122 +++++++++++++++---------------
17 gcc/doc/invoke.texi | 6 ++
18 gcc/testsuite/gcc.target/i386/local.c | 3 +-
19 gcc/testsuite/gcc.target/i386/strict-cc.c | 24 ++++++
20 5 files changed, 98 insertions(+), 61 deletions(-)
21 create mode 100644 gcc/testsuite/gcc.target/i386/strict-cc.c
23 diff --git a/gcc/common.opt b/gcc/common.opt
24 index 437db8e8615..978fbc83b89 100644
27 @@ -2341,6 +2341,10 @@ fstrict-aliasing
28 Common Report Var(flag_strict_aliasing) Optimization
29 Assume strict aliasing rules apply.
31 +fstrict-calling-conventions
32 +Common Report Var(flag_strict_calling_conventions) Init(1)
33 +Use strict ABI calling conventions even for static functions
36 Common Report Var(flag_strict_overflow) Optimization
37 Treat signed overflow as undefined.
38 diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
39 index ba2abc53ed1..3651b995ea0 100644
40 --- a/gcc/config/i386/i386.c
41 +++ b/gcc/config/i386/i386.c
42 @@ -2599,7 +2599,7 @@ struct ptt
46 -/* This table must be in sync with enum processor_type in i386.h. */
47 +/* This table must be in sync with enum processor_type in i386.h. */
48 static const struct ptt processor_target_table[PROCESSOR_max] =
50 {"generic", &generic_cost, 16, 10, 16, 10, 16},
51 @@ -5107,14 +5107,14 @@ ix86_option_override_internal (bool main_args_p,
52 | PTA_SSE4A | PTA_CX16 | PTA_ABM | PTA_SSSE3 | PTA_SSE4_1
53 | PTA_SSE4_2 | PTA_AES | PTA_PCLMUL | PTA_AVX | PTA_FMA4
54 | PTA_XOP | PTA_LWP | PTA_BMI | PTA_TBM | PTA_F16C
55 - | PTA_FMA | PTA_PRFCHW | PTA_FXSR | PTA_XSAVE
56 + | PTA_FMA | PTA_PRFCHW | PTA_FXSR | PTA_XSAVE
57 | PTA_XSAVEOPT | PTA_FSGSBASE},
58 {"bdver4", PROCESSOR_BDVER4, CPU_BDVER4,
59 PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
60 | PTA_SSE4A | PTA_CX16 | PTA_ABM | PTA_SSSE3 | PTA_SSE4_1
61 - | PTA_SSE4_2 | PTA_AES | PTA_PCLMUL | PTA_AVX | PTA_AVX2
62 - | PTA_FMA4 | PTA_XOP | PTA_LWP | PTA_BMI | PTA_BMI2
63 - | PTA_TBM | PTA_F16C | PTA_FMA | PTA_PRFCHW | PTA_FXSR
64 + | PTA_SSE4_2 | PTA_AES | PTA_PCLMUL | PTA_AVX | PTA_AVX2
65 + | PTA_FMA4 | PTA_XOP | PTA_LWP | PTA_BMI | PTA_BMI2
66 + | PTA_TBM | PTA_F16C | PTA_FMA | PTA_PRFCHW | PTA_FXSR
67 | PTA_XSAVE | PTA_XSAVEOPT | PTA_FSGSBASE | PTA_RDRND
68 | PTA_MOVBE | PTA_MWAITX},
69 {"znver1", PROCESSOR_ZNVER1, CPU_ZNVER1,
70 @@ -5982,7 +5982,7 @@ ix86_option_override_internal (bool main_args_p,
71 /* For all chips supporting SSE2, -mfpmath=sse performs better than
72 fpmath=387. The second is however default at many targets since the
73 extra 80bit precision of temporaries is considered to be part of ABI.
74 - Overwrite the default at least for -ffast-math.
75 + Overwrite the default at least for -ffast-math.
76 TODO: -mfpmath=both seems to produce same performing code with bit
77 smaller binaries. It is however not clear if register allocation is
78 ready for this setting.
79 @@ -6331,7 +6331,7 @@ ix86_conditional_register_usage (void)
81 /* See the definition of CALL_USED_REGISTERS in i386.h. */
82 c_mask = CALL_USED_REGISTERS_MASK (TARGET_64BIT_MS_ABI);
85 CLEAR_HARD_REG_SET (reg_class_contents[(int)CLOBBERED_REGS]);
87 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
88 @@ -6998,9 +6998,9 @@ ix86_valid_target_attribute_p (tree fndecl,
90 tree old_optimize = build_optimization_node (&global_options);
92 - /* Get the optimization options of the current function. */
93 + /* Get the optimization options of the current function. */
94 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
98 func_optimize = old_optimize;
100 @@ -7008,7 +7008,7 @@ ix86_valid_target_attribute_p (tree fndecl,
101 memset (&func_options, 0, sizeof (func_options));
102 init_options_struct (&func_options, NULL);
103 lang_hooks.init_options_struct (&func_options);
106 cl_optimization_restore (&func_options,
107 TREE_OPTIMIZATION (func_optimize));
109 @@ -8015,6 +8015,7 @@ ix86_function_regparm (const_tree type, const_tree decl)
110 and callee not, or vice versa. Instead look at whether the callee
111 is optimized or not. */
112 if (target && opt_for_fn (target->decl, optimize)
113 + && !flag_strict_calling_conventions
114 && !(profile_flag && !flag_fentry))
116 cgraph_local_info *i = &target->local;
117 @@ -8112,6 +8113,7 @@ ix86_function_sseregparm (const_tree type, const_tree decl, bool warn)
118 /* TARGET_SSE_MATH */
119 && (target_opts_for_fn (target->decl)->x_ix86_fpmath & FPMATH_SSE)
120 && opt_for_fn (target->decl, optimize)
121 + && !flag_strict_calling_conventions
122 && !(profile_flag && !flag_fentry))
124 cgraph_local_info *i = &target->local;
125 @@ -8710,7 +8712,7 @@ init_cumulative_args (CUMULATIVE_ARGS *cum, /* Argument info to initialize */
127 The midde-end can't deal with the vector types > 16 bytes. In this
128 case, we return the original mode and warn ABI change if CUM isn't
132 If INT_RETURN is true, warn ABI change if the vector mode isn't
133 available for function return value. */
134 @@ -10808,7 +10810,7 @@ ix86_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
136 /* Unless ABI prescibes otherwise,
137 MMX/3dNow values are returned in MM0 if available. */
141 return TARGET_VECT8_RETURNS || !TARGET_MMX;
143 @@ -10916,7 +10918,7 @@ ix86_build_builtin_va_list (void)
145 /* For SYSV_ABI we use an array of one record. */
146 sysv_va_list_type_node = ix86_build_builtin_va_list_64 ();
149 /* For MS_ABI we use plain pointer to argument area. */
150 tree char_ptr_type = build_pointer_type (char_type_node);
151 tree attr = tree_cons (get_identifier ("ms_abi va_list"), NULL_TREE,
152 @@ -11861,7 +11863,7 @@ standard_sse_constant_opcode (rtx_insn *insn, rtx x)
153 else if (x == constm1_rtx || vector_all_ones_operand (x, mode))
155 enum attr_mode insn_mode = get_attr_mode (insn);
161 @@ -11996,7 +11998,7 @@ ix86_frame_pointer_required (void)
162 /* SSE saves require frame-pointer when stack is misaligned. */
163 if (TARGET_64BIT_MS_ABI && ix86_incoming_stack_boundary < 128)
167 /* In ix86_option_override_internal, TARGET_OMIT_LEAF_FRAME_POINTER
168 turns off the frame pointer by default. Turn it back on now if
169 we've not got a leaf function. */
170 @@ -15549,7 +15551,7 @@ ix86_decompose_address (rtx addr, struct ix86_address *out)
171 addr = XEXP (addr, 0);
172 if (CONST_INT_P (addr))
176 else if (GET_CODE (addr) == AND
177 && const_32bit_mask (XEXP (addr, 1), DImode))
179 @@ -17062,7 +17064,7 @@ get_dllimport_decl (tree decl, bool beimport)
180 #ifdef SUB_TARGET_RECORD_STUB
181 SUB_TARGET_RECORD_STUB (name);
186 rtl = gen_const_mem (Pmode, rtl);
187 set_mem_alias_set (rtl, ix86_GOT_alias_set ());
188 @@ -17109,7 +17111,7 @@ legitimize_dllimport_symbol (rtx symbol, bool want_reg)
192 -/* Expand SYMBOL into its corresponding dllimport or refptr symbol. WANT_REG
193 +/* Expand SYMBOL into its corresponding dllimport or refptr symbol. WANT_REG
194 is true if we require the result be a register. */
197 @@ -18245,7 +18247,7 @@ ix86_print_operand (FILE *file, rtx x, int code)
206 @@ -19637,7 +19639,7 @@ ix86_mode_needed (int entity, rtx_insn *insn)
209 /* Check if a 256bit or 512bit AVX register is referenced in stores. */
213 ix86_check_avx_upper_stores (rtx dest, const_rtx, void *data)
215 @@ -19646,7 +19648,7 @@ ix86_check_avx_upper_stores (rtx dest, const_rtx, void *data)
216 bool *used = (bool *) data;
222 /* Calculate mode of upper 128bit AVX registers after the insn. */
224 @@ -20647,7 +20649,7 @@ ix86_expand_vector_move_misalign (machine_mode mode, rtx operands[])
225 t = gen_reg_rtx (V4SFmode);
230 if (TARGET_SSE_PARTIAL_REG_DEPENDENCY)
231 emit_move_insn (t, CONST0_RTX (V4SFmode));
233 @@ -21657,7 +21659,7 @@ ix86_emit_binop (enum rtx_code code, machine_mode mode,
235 op = gen_rtx_SET (dst, gen_rtx_fmt_ee (code, mode, dst, src));
236 clob = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG));
239 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, op, clob)));
242 @@ -25325,7 +25327,7 @@ ix86_expand_vec_perm (rtx operands[])
246 - /* The XOP VPPERM insn supports three inputs. By ignoring the
247 + /* The XOP VPPERM insn supports three inputs. By ignoring the
248 one_operand_shuffle special case, we avoid creating another
249 set of constant vectors in memory. */
250 one_operand_shuffle = false;
251 @@ -27224,7 +27226,7 @@ expand_small_movmem_or_setmem (rtx destmem, rtx srcmem,
252 DONE_LABEL is a label after the whole copying sequence. The label is created
253 on demand if *DONE_LABEL is NULL.
254 MIN_SIZE is minimal size of block copied. This value gets adjusted for new
255 - bounds after the initial copies.
256 + bounds after the initial copies.
258 DESTMEM/SRCMEM are memory expressions pointing to the copies block,
259 DESTPTR/SRCPTR are pointers to the block. DYNAMIC_CHECK indicate whether
260 @@ -27533,7 +27535,7 @@ expand_set_or_movmem_constant_prologue (rtx dst, rtx *srcp, rtx destreg,
264 -/* Return true if ALG can be used in current context.
265 +/* Return true if ALG can be used in current context.
266 Assume we expand memset if MEMSET is true. */
268 alg_usable_p (enum stringop_alg alg, bool memset, bool have_as)
269 @@ -27866,7 +27868,7 @@ promote_duplicated_reg_to_size (rtx val, int size_needed, int desired_align,
270 with specified algorithm.
272 4) Epilogue: code copying tail of the block that is too small to be
273 - handled by main body (or up to size guarded by prologue guard).
274 + handled by main body (or up to size guarded by prologue guard).
276 Misaligned move sequence
278 @@ -28073,7 +28075,7 @@ ix86_expand_set_or_movmem (rtx dst, rtx src, rtx count_exp, rtx val_exp,
280 /* Do the cheap promotion to allow better CSE across the
281 main loop and epilogue (ie one load of the big constant in the
284 For now the misaligned move sequences do not have fast path
285 without broadcasting. */
286 if (issetmem && ((CONST_INT_P (val_exp) || misaligned_prologue_used)))
287 @@ -30685,7 +30687,7 @@ ix86_dependencies_evaluation_hook (rtx_insn *head, rtx_insn *tail)
288 using topological ordering in the region. */
289 if (rgn == CONTAINING_RGN (e->src->index)
290 && BLOCK_TO_BB (bb->index) > BLOCK_TO_BB (e->src->index))
291 - add_dependee_for_func_arg (first_arg, e->src);
292 + add_dependee_for_func_arg (first_arg, e->src);
296 @@ -31192,7 +31194,7 @@ ix86_local_alignment (tree exp, machine_mode mode,
297 other unit can not rely on the alignment.
299 Exclude va_list type. It is the common case of local array where
300 - we can not benefit from the alignment.
301 + we can not benefit from the alignment.
303 TODO: Probably one should optimize for size only when var is not escaping. */
304 if (TARGET_64BIT && optimize_function_for_speed_p (cfun)
305 @@ -33010,7 +33012,7 @@ add_condition_to_bb (tree function_decl, tree version_decl,
306 convert_expr = build1 (CONVERT_EXPR, ptr_type_node,
307 build_fold_addr_expr (version_decl));
308 result_var = create_tmp_var (ptr_type_node);
309 - convert_stmt = gimple_build_assign (result_var, convert_expr);
310 + convert_stmt = gimple_build_assign (result_var, convert_expr);
311 return_stmt = gimple_build_return (result_var);
313 if (predicate_chain == NULL_TREE)
314 @@ -33037,7 +33039,7 @@ add_condition_to_bb (tree function_decl, tree version_decl,
315 gimple_seq_add_stmt (&gseq, call_cond_stmt);
317 predicate_chain = TREE_CHAIN (predicate_chain);
320 if (and_expr_var == NULL)
321 and_expr_var = cond_var;
323 @@ -33078,7 +33080,7 @@ add_condition_to_bb (tree function_decl, tree version_decl,
324 gimple_set_bb (return_stmt, bb2);
327 - make_edge (bb1, bb3, EDGE_FALSE_VALUE);
328 + make_edge (bb1, bb3, EDGE_FALSE_VALUE);
331 make_edge (bb2, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
332 @@ -33134,7 +33136,7 @@ get_builtin_code_for_version (tree decl, tree *predicate_list)
341 @@ -33207,11 +33209,11 @@ get_builtin_code_for_version (tree decl, tree *predicate_list)
342 cl_target_option_save (&cur_target, &global_options);
343 target_node = ix86_valid_target_attribute_tree (attrs, &global_options,
344 &global_options_set);
347 gcc_assert (target_node);
348 new_target = TREE_TARGET_OPTION (target_node);
349 gcc_assert (new_target);
352 if (new_target->arch_specified && new_target->arch > 0)
354 switch (new_target->arch)
355 @@ -33296,14 +33298,14 @@ get_builtin_code_for_version (tree decl, tree *predicate_list)
358 cl_target_option_restore (&global_options, &cur_target);
361 if (predicate_list && arg_str == NULL)
363 error_at (DECL_SOURCE_LOCATION (decl),
364 "No dispatcher found for the versioning attributes");
371 predicate_decl = ix86_builtins [(int) IX86_BUILTIN_CPU_IS];
372 @@ -33370,7 +33372,7 @@ get_builtin_code_for_version (tree decl, tree *predicate_list)
373 *predicate_list = predicate_chain;
380 /* This compares the priority of target features in function DECL1
381 @@ -33389,7 +33391,7 @@ ix86_compare_version_priority (tree decl1, tree decl2)
383 /* V1 and V2 point to function versions with different priorities
384 based on the target ISA. This function compares their priorities. */
388 feature_compare (const void *v1, const void *v2)
390 @@ -33713,12 +33715,12 @@ ix86_function_versions (tree fn1, tree fn2)
393 XDELETEVEC (target1);
394 - XDELETEVEC (target2);
396 + XDELETEVEC (target2);
403 ix86_mangle_decl_assembler_name (tree decl, tree id)
405 /* For function version, add the target suffix to the assembler name. */
406 @@ -33788,7 +33790,7 @@ make_dispatcher_decl (const tree decl)
407 fn_type = TREE_TYPE (decl);
408 func_type = build_function_type (TREE_TYPE (fn_type),
409 TYPE_ARG_TYPES (fn_type));
412 func_decl = build_fn_decl (func_name, func_type);
413 XDELETEVEC (func_name);
414 TREE_USED (func_decl) = 1;
415 @@ -33801,7 +33803,7 @@ make_dispatcher_decl (const tree decl)
416 /* This will be of type IFUNCs have to be externally visible. */
417 TREE_PUBLIC (func_decl) = 1;
424 @@ -33838,7 +33840,7 @@ ix86_get_function_versions_dispatcher (void *decl)
425 tree dispatch_decl = NULL;
427 struct cgraph_function_version_info *default_version_info = NULL;
430 gcc_assert (fn != NULL && DECL_FUNCTION_VERSIONED (fn));
432 node = cgraph_node::get (fn);
433 @@ -33846,7 +33848,7 @@ ix86_get_function_versions_dispatcher (void *decl)
435 node_v = node->function_version ();
436 gcc_assert (node_v != NULL);
439 if (node_v->dispatcher_resolver != NULL)
440 return node_v->dispatcher_resolver;
442 @@ -33993,7 +33995,7 @@ make_resolver_func (const tree default_decl,
444 gcc_assert (dispatch_decl != NULL);
445 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
446 - DECL_ATTRIBUTES (dispatch_decl)
447 + DECL_ATTRIBUTES (dispatch_decl)
448 = make_attribute ("ifunc", resolver_name, DECL_ATTRIBUTES (dispatch_decl));
450 /* Create the alias for dispatch to resolver here. */
451 @@ -34008,7 +34010,7 @@ make_resolver_func (const tree default_decl,
452 provide the code to dispatch the right function at run-time. NODE points
453 to the dispatcher decl whose body will be created. */
457 ix86_generate_version_dispatcher_body (void *node_p)
460 @@ -34188,7 +34190,7 @@ fold_builtin_cpu (tree fndecl, tree *args)
468 M_INTEL_COREI7_NEHALEM,
469 @@ -34237,7 +34239,7 @@ fold_builtin_cpu (tree fndecl, tree *args)
470 {"barcelona", M_AMDFAM10H_BARCELONA},
471 {"shanghai", M_AMDFAM10H_SHANGHAI},
472 {"istanbul", M_AMDFAM10H_ISTANBUL},
473 - {"btver1", M_AMD_BTVER1},
474 + {"btver1", M_AMD_BTVER1},
475 {"amdfam15h", M_AMDFAM15H},
476 {"bdver1", M_AMDFAM15H_BDVER1},
477 {"bdver2", M_AMDFAM15H_BDVER2},
478 @@ -37540,9 +37542,9 @@ ix86_expand_builtin (tree exp, rtx target, rtx subtarget,
480 /* Make it call __cpu_indicator_init in libgcc. */
481 tree call_expr, fndecl, type;
482 - type = build_function_type_list (integer_type_node, NULL_TREE);
483 + type = build_function_type_list (integer_type_node, NULL_TREE);
484 fndecl = build_fn_decl ("__cpu_indicator_init", type);
485 - call_expr = build_call_expr (fndecl, 0);
486 + call_expr = build_call_expr (fndecl, 0);
487 return expand_expr (call_expr, target, mode, EXPAND_NORMAL);
489 case IX86_BUILTIN_CPU_IS:
490 @@ -38016,7 +38018,7 @@ ix86_expand_builtin (tree exp, rtx target, rtx subtarget,
492 op2 = copy_to_mode_reg (SImode, op2);
494 - emit_insn (fcode == IX86_BUILTIN_MONITOR
495 + emit_insn (fcode == IX86_BUILTIN_MONITOR
496 ? ix86_gen_monitor (op0, op1, op2)
497 : ix86_gen_monitorx (op0, op1, op2));
499 @@ -43100,7 +43102,7 @@ ix86_mitigate_rop (void)
500 regrename_analyze (NULL);
502 auto_vec<du_head_p> cands;
505 for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn))
507 if (!NONDEBUG_INSN_P (insn))
508 @@ -43176,7 +43178,7 @@ ix86_mitigate_rop (void)
509 reg_names[best_reg], reg_class_names[superclass]);
517 @@ -43274,7 +43276,7 @@ ix86_reorg (void)
519 if (flag_mitigate_rop)
520 ix86_mitigate_rop ();
523 if (TARGET_SEH && current_function_has_exception_handlers ())
524 ix86_seh_fixup_eh_fallthru ();
526 @@ -50097,7 +50099,7 @@ ix86_expand_sse2_mulvxdi3 (rtx op0, rtx op1, rtx op2)
528 /* Multiply lower parts and add all */
529 t5 = gen_reg_rtx (V2DImode);
530 - emit_insn (gen_vec_widen_umult_even_v4si (t5,
531 + emit_insn (gen_vec_widen_umult_even_v4si (t5,
532 gen_lowpart (V4SImode, op1),
533 gen_lowpart (V4SImode, op2)));
534 op0 = expand_binop (mode, add_optab, t5, t4, op0, 1, OPTAB_DIRECT);
535 @@ -50243,7 +50245,7 @@ ix86_expand_pextr (rtx *operands)
537 dst = SUBREG_REG (dst);
543 pos += SUBREG_BYTE (src) * BITS_PER_UNIT;
544 @@ -52079,7 +52081,7 @@ extract_base_offset_in_addr (rtx mem, rtx *base, rtx *offset)
545 gcc_assert (MEM_P (mem));
547 addr = XEXP (mem, 0);
550 if (GET_CODE (addr) == CONST)
551 addr = XEXP (addr, 0);
553 diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
554 index 6f3c344476c..68127bbf6ef 100644
555 --- a/gcc/doc/invoke.texi
556 +++ b/gcc/doc/invoke.texi
557 @@ -8639,6 +8639,12 @@ int f() @{
558 The @option{-fstrict-aliasing} option is enabled at levels
559 @option{-O2}, @option{-O3}, @option{-Os}.
561 +@item -fstrict-calling-conventions
562 +@opindex fstrict-calling-conventions
563 +Use strict ABI calling conventions even with local functions.
564 +This disable certain optimizations that may cause GCC to call local
565 +functions in a manner other than that described by the ABI.
567 @item -fstrict-overflow
568 @opindex fstrict-overflow
569 Allow the compiler to assume strict signed overflow rules, depending
570 diff --git a/gcc/testsuite/gcc.target/i386/local.c b/gcc/testsuite/gcc.target/i386/local.c
571 index f4444951e12..3a487583d81 100644
572 --- a/gcc/testsuite/gcc.target/i386/local.c
573 +++ b/gcc/testsuite/gcc.target/i386/local.c
575 /* { dg-do compile } */
576 -/* { dg-options "-O2 -funit-at-a-time" } */
577 +/* { dg-options "-O2 -funit-at-a-time -fno-strict-calling-conventions" { target ia32 } } */
578 +/* { dg-options "-O2 -funit-at-a-time" { target lp64 } } */
579 /* { dg-final { scan-assembler "magic\[^\\n\]*eax" { target ia32 } } } */
580 /* { dg-final { scan-assembler "magic\[^\\n\]*(edi|ecx)" { target { ! ia32 } } } } */
582 diff --git a/gcc/testsuite/gcc.target/i386/strict-cc.c b/gcc/testsuite/gcc.target/i386/strict-cc.c
584 index 00000000000..fa0543e52ff
586 +++ b/gcc/testsuite/gcc.target/i386/strict-cc.c
588 +/* { dg-do compile { target { ilp32 } } } */
589 +/* { dg-options "-O2 -funit-at-a-time -fstrict-calling-conventions" } */
590 +/* { dg-final { scan-assembler "pushl.*\\\$1" } } */
591 +/* { dg-final { scan-assembler "pushl.*\\\$2" } } */
592 +/* { dg-final { scan-assembler "pushl.*\\\$3" } } */
593 +/* { dg-final { scan-assembler "pushl.*\\\$4" } } */
594 +/* { dg-final { scan-assembler "pushl.*\\\$5" } } */
598 +/* Verify that local calling convention is not used if strict conventions. */
599 +static int t(int, int, int, int, int) __attribute__ ((noinline));
608 +t(int a, int b, int c, int d, int e)
610 + printf("%d\n", a, b, c, d, e);