1 /* Subroutines used for code generation for eBPF.
2 Copyright (C) 2019-2023 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #define IN_TARGET_CODE 1
24 #include "coretypes.h"
28 #include "insn-config.h"
29 #include "insn-attr.h"
34 #include "stringpool.h"
37 #include "stor-layout.h"
46 #include "target-def.h"
47 #include "basic-block.h"
52 #include "c-family/c-common.h"
53 #include "diagnostic.h"
56 #include "langhooks.h"
59 #include "cfg.h" /* needed for struct control_flow_graph used in BB macros */
61 #include "gimple-iterator.h"
62 #include "gimple-walk.h"
63 #include "tree-pass.h"
64 #include "tree-iterator.h"
67 #include "pass_manager.h"
70 #include "gimplify-me.h"
72 #include "core-builtins.h"
75 /* Per-function machine data. */
76 struct GTY(()) machine_function
78 /* Number of bytes saved on the stack for local variables. */
82 /* Handle an attribute requiring a FUNCTION_DECL;
83 arguments as in struct attribute_spec.handler. */
86 bpf_handle_fndecl_attribute (tree
*node
, tree name
,
88 int flags ATTRIBUTE_UNUSED
,
91 if (TREE_CODE (*node
) != FUNCTION_DECL
)
93 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
98 if (is_attribute_p ("kernel_helper", name
))
102 tree cst
= TREE_VALUE (args
);
103 if (TREE_CODE (cst
) != INTEGER_CST
)
105 warning (OPT_Wattributes
, "%qE attribute requires an integer argument",
107 *no_add_attrs
= true;
112 warning (OPT_Wattributes
, "%qE requires an argument", name
);
113 *no_add_attrs
= true;
120 /* Handle preserve_access_index attribute, which can be applied to structs,
121 unions and classes. Actually adding the attribute to the TYPE_DECL is
122 taken care of for us, so just warn for types that aren't supported. */
125 bpf_handle_preserve_access_index_attribute (tree
*node
, tree name
,
126 tree args ATTRIBUTE_UNUSED
,
127 int flags ATTRIBUTE_UNUSED
,
130 if (TREE_CODE (*node
) != RECORD_TYPE
&& TREE_CODE (*node
) != UNION_TYPE
)
132 warning (OPT_Wattributes
,
133 "%qE attribute only applies to structure, union and class types",
135 *no_add_attrs
= true;
141 /* Target-specific attributes. */
143 static const struct attribute_spec bpf_attribute_table
[] =
145 /* Syntax: { name, min_len, max_len, decl_required, type_required,
146 function_type_required, affects_type_identity, handler,
149 /* Attribute to mark function prototypes as kernel helpers. */
150 { "kernel_helper", 1, 1, true, false, false, false,
151 bpf_handle_fndecl_attribute
, NULL
},
153 /* CO-RE support: attribute to mark that all accesses to the declared
154 struct/union/array should be recorded. */
155 { "preserve_access_index", 0, -1, false, true, false, true,
156 bpf_handle_preserve_access_index_attribute
, NULL
},
158 /* Support for `naked' function attribute. */
159 { "naked", 0, 1, false, false, false, false,
160 bpf_handle_fndecl_attribute
, NULL
},
162 /* The last attribute spec is set to be NULL. */
163 { NULL
, 0, 0, false, false, false, false, NULL
, NULL
}
166 #undef TARGET_ATTRIBUTE_TABLE
167 #define TARGET_ATTRIBUTE_TABLE bpf_attribute_table
169 /* Data structures for the eBPF specific built-ins. */
171 /* Maximum number of arguments taken by a builtin function, plus
173 #define BPF_BUILTIN_MAX_ARGS 5
175 GTY (()) tree bpf_builtins
[(int) BPF_BUILTIN_MAX
];
177 void bpf_register_coreattr_pass (void);
179 /* Initialize the per-function machine status. */
181 static struct machine_function
*
182 bpf_init_machine_status (void)
184 /* Note this initializes all fields to 0, which is just OK for
186 return ggc_cleared_alloc
<machine_function
> ();
189 /* Override options and do some other initialization. */
192 bpf_option_override (void)
194 /* Set the initializer for the per-function status structure. */
195 init_machine_status
= bpf_init_machine_status
;
197 /* BPF CO-RE support requires BTF debug info generation. */
198 if (TARGET_BPF_CORE
&& !btf_debuginfo_p ())
199 error ("BPF CO-RE requires BTF debugging information, use %<-gbtf%>");
201 /* To support the portability needs of BPF CO-RE approach, BTF debug
202 information includes the BPF CO-RE relocations. */
204 write_symbols
|= BTF_WITH_CORE_DEBUG
;
206 /* Unlike much of the other BTF debug information, the information necessary
207 for CO-RE relocations is added to the CTF container by the BPF backend.
208 Enabling LTO adds some complications in the generation of the BPF CO-RE
209 relocations because if LTO is in effect, the relocations need to be
210 generated late in the LTO link phase. This poses a new challenge for the
211 compiler to now provide means to combine the early BTF and late BTF CO-RE
212 debug info, similar to DWARF debug info. BTF/CO-RE debug info is not
213 amenable to such a split generation and a later merging.
215 In any case, in absence of linker support for BTF sections at this time,
216 it is acceptable to simply disallow LTO for BPF CO-RE compilations. */
218 if (flag_lto
&& TARGET_BPF_CORE
)
219 sorry ("BPF CO-RE does not support LTO");
221 /* -gbtf implies -mcore when using the BPF backend, unless -mno-co-re
223 if (btf_debuginfo_p () && !(target_flags_explicit
& MASK_BPF_CORE
))
225 target_flags
|= MASK_BPF_CORE
;
226 write_symbols
|= BTF_WITH_CORE_DEBUG
;
229 /* Determine available features from ISA setting (-mcpu=). */
230 if (bpf_has_jmpext
== -1)
231 bpf_has_jmpext
= (bpf_isa
>= ISA_V2
);
233 if (bpf_has_alu32
== -1)
234 bpf_has_alu32
= (bpf_isa
>= ISA_V3
);
236 if (bpf_has_jmp32
== -1)
237 bpf_has_jmp32
= (bpf_isa
>= ISA_V3
);
239 if (bpf_has_v3_atomics
== -1)
240 bpf_has_v3_atomics
= (bpf_isa
>= ISA_V3
);
242 if (bpf_has_bswap
== -1)
243 bpf_has_bswap
= (bpf_isa
>= ISA_V4
);
245 if (bpf_has_sdiv
== -1)
246 bpf_has_sdiv
= (bpf_isa
>= ISA_V4
);
248 if (bpf_has_smov
== -1)
249 bpf_has_smov
= (bpf_isa
>= ISA_V4
);
251 /* Disable -fstack-protector as it is not supported in BPF. */
252 if (flag_stack_protect
)
254 if (!flag_stack_protector_set_by_fhardened_p
)
255 inform (input_location
,
256 "%<-fstack-protector%> does not work "
257 "on this architecture");
258 flag_stack_protect
= 0;
261 /* The BPF target does not support tail call optimization. */
262 flag_optimize_sibling_calls
= 0;
265 #undef TARGET_OPTION_OVERRIDE
266 #define TARGET_OPTION_OVERRIDE bpf_option_override
268 /* Implement TARGET_ASM_INIT_SECTIONS. */
271 bpf_asm_init_sections (void)
277 #undef TARGET_ASM_INIT_SECTIONS
278 #define TARGET_ASM_INIT_SECTIONS bpf_asm_init_sections
280 /* Implement TARGET_ASM_FILE_END. */
289 #undef TARGET_ASM_FILE_END
290 #define TARGET_ASM_FILE_END bpf_file_end
292 /* Define target-specific CPP macros. This function in used in the
293 definition of TARGET_CPU_CPP_BUILTINS in bpf.h */
295 #define builtin_define(TXT) cpp_define (pfile, TXT)
298 bpf_target_macros (cpp_reader
*pfile
)
300 builtin_define ("__BPF__");
301 builtin_define ("__bpf__");
303 if (TARGET_BIG_ENDIAN
)
304 builtin_define ("__BPF_BIG_ENDIAN__");
306 builtin_define ("__BPF_LITTLE_ENDIAN__");
309 /* Return an RTX representing the place where a function returns or
310 receives a value of data type RET_TYPE, a tree node representing a
314 bpf_function_value (const_tree ret_type
,
315 const_tree fntype_or_decl
,
316 bool outgoing ATTRIBUTE_UNUSED
)
318 enum machine_mode mode
;
321 mode
= TYPE_MODE (ret_type
);
322 if (INTEGRAL_TYPE_P (ret_type
))
323 mode
= promote_function_mode (ret_type
, mode
, &unsignedp
,
326 return gen_rtx_REG (mode
, BPF_R0
);
329 #undef TARGET_FUNCTION_VALUE
330 #define TARGET_FUNCTION_VALUE bpf_function_value
332 /* Return true if REGNO is the number of a hard register in which the
333 values of called function may come back. */
336 bpf_function_value_regno_p (const unsigned int regno
)
338 return (regno
== BPF_R0
);
341 #undef TARGET_FUNCTION_VALUE_REGNO_P
342 #define TARGET_FUNCTION_VALUE_REGNO_P bpf_function_value_regno_p
345 /* Determine whether to warn about lack of return statement in a
349 bpf_warn_func_return (tree decl
)
351 /* Naked functions are implemented entirely in assembly, including
352 the return instructions. */
353 return lookup_attribute ("naked", DECL_ATTRIBUTES (decl
)) == NULL_TREE
;
356 #undef TARGET_WARN_FUNC_RETURN
357 #define TARGET_WARN_FUNC_RETURN bpf_warn_func_return
359 /* Compute the size of the function's stack frame, including the local
360 area and the register-save area. */
363 bpf_compute_frame_layout (void)
365 int stack_alignment
= STACK_BOUNDARY
/ BITS_PER_UNIT
;
368 /* Set the space used in the stack by local variables. This is
369 rounded up to respect the minimum stack alignment. */
370 cfun
->machine
->local_vars_size
= get_frame_size ();
372 padding_locals
= cfun
->machine
->local_vars_size
% stack_alignment
;
374 padding_locals
= stack_alignment
- padding_locals
;
376 cfun
->machine
->local_vars_size
+= padding_locals
;
378 /* Check that the total size of the frame doesn't exceed the limit
380 if (cfun
->machine
->local_vars_size
> bpf_frame_limit
)
382 static int stack_limit_exceeded
= 0;
384 if (!stack_limit_exceeded
)
385 error ("eBPF stack limit exceeded");
386 stack_limit_exceeded
= 1;
390 #undef TARGET_COMPUTE_FRAME_LAYOUT
391 #define TARGET_COMPUTE_FRAME_LAYOUT bpf_compute_frame_layout
393 /* Expand to the instructions in a function prologue. This function
394 is called when expanding the 'prologue' pattern in bpf.md. */
397 bpf_expand_prologue (void)
399 /* The BPF "hardware" provides a fresh new set of registers for each
400 called function, some of which are initialized to the values of
401 the arguments passed in the first five registers. In doing so,
402 it saves the values of the registers of the caller, and restores
403 them upon returning. Therefore, there is no need to save the
404 callee-saved registers here. In fact, the kernel implementation
405 refuses to run programs in which registers are referred before
406 being initialized. */
408 /* BPF does not support functions that allocate stack space
409 dynamically. This should have been checked already and an error
411 gcc_assert (!cfun
->calls_alloca
);
413 /* If we ever need to have a proper prologue here, please mind the
414 `naked' function attribute. */
417 /* Expand to the instructions in a function epilogue. This function
418 is called when expanding the 'epilogue' pattern in bpf.md. */
421 bpf_expand_epilogue (void)
423 /* See note in bpf_expand_prologue for an explanation on why we are
424 not restoring callee-saved registers in BPF. */
426 /* If we ever need to do anything else than just generating a return
427 instruction here, please mind the `naked' function attribute. */
429 emit_jump_insn (gen_exit ());
432 /* Expand to the instructions for a conditional branch. This function
433 is called when expanding the 'cbranch<mode>4' pattern in bpf.md. */
436 bpf_expand_cbranch (machine_mode mode
, rtx
*operands
)
438 /* If all jump instructions are available, nothing special to do here. */
442 enum rtx_code code
= GET_CODE (operands
[0]);
444 /* Without the conditional branch instructions jslt, jsle, jlt, jle, we need
445 to convert conditional branches that would use them to an available
446 operation instead by reversing the comparison. */
447 if ((code
== LT
|| code
== LE
|| code
== LTU
|| code
== LEU
))
449 /* Reverse the condition. */
450 PUT_CODE (operands
[0], reverse_condition (code
));
452 /* Swap the operands, and ensure that the first is a register. */
453 if (!register_operand (operands
[2], mode
))
454 operands
[2] = force_reg (mode
, operands
[2]);
456 rtx tmp
= operands
[1];
457 operands
[1] = operands
[2];
462 /* Return the initial difference between the specified pair of
463 registers. The registers that can figure in FROM, and TO, are
464 specified by ELIMINABLE_REGS in bpf.h.
466 This function is used in the definition of
467 INITIAL_ELIMINATION_OFFSET in bpf.h */
470 bpf_initial_elimination_offset (int from
, int to
)
474 if (from
== ARG_POINTER_REGNUM
&& to
== FRAME_POINTER_REGNUM
)
476 else if (from
== STACK_POINTER_REGNUM
&& to
== FRAME_POINTER_REGNUM
)
477 ret
= -(cfun
->machine
->local_vars_size
);
484 /* Return the number of consecutive hard registers, starting at
485 register number REGNO, required to hold a value of mode MODE. */
488 bpf_hard_regno_nregs (unsigned int regno ATTRIBUTE_UNUSED
,
489 enum machine_mode mode
)
491 return CEIL (GET_MODE_SIZE (mode
), UNITS_PER_WORD
);
494 #undef TARGET_HARD_REGNO_NREGS
495 #define TARGET_HARD_REGNO_NREGS bpf_hard_regno_nregs
497 /* Return true if it is permissible to store a value of mode MODE in
498 hard register number REGNO, or in several registers starting with
502 bpf_hard_regno_mode_ok (unsigned int regno ATTRIBUTE_UNUSED
,
503 enum machine_mode mode
)
520 #undef TARGET_HARD_REGNO_MODE_OK
521 #define TARGET_HARD_REGNO_MODE_OK bpf_hard_regno_mode_ok
523 /* Return true if a function must have and use a frame pointer. */
526 bpf_frame_pointer_required (void)
528 /* We do not have a stack pointer, so we absolutely depend on the
529 frame-pointer in order to access the stack... and fishes walk and
534 #undef TARGET_FRAME_POINTER_REQUIRED
535 #define TARGET_FRAME_POINTER_REQUIRED bpf_frame_pointer_required
537 /* Return `true' if the given RTX X is a valid base for an indirect
538 memory access. STRICT has the same meaning than in
539 bpf_legitimate_address_p. */
542 bpf_address_base_p (rtx x
, bool strict
)
544 return (GET_CODE (x
) == REG
546 || (!strict
&& REGNO (x
) >= FIRST_PSEUDO_REGISTER
)));
549 /* Return true if X (a RTX) is a legitimate memory address on the
550 target machine for a memory operand of mode MODE. */
553 bpf_legitimate_address_p (machine_mode mode
,
556 code_helper
= ERROR_MARK
)
558 switch (GET_CODE (x
))
561 return (mode
== FUNCTION_MODE
);
564 return bpf_address_base_p (x
, strict
);
568 /* Accept (PLUS ADDR_BASE CONST_INT), provided CONST_INT fits
571 Note that LABEL_REF and SYMBOL_REF are not allowed in
572 REG+IMM addresses, because it is almost certain they will
573 overload the offset field. */
575 rtx x0
= XEXP (x
, 0);
576 rtx x1
= XEXP (x
, 1);
578 if (bpf_address_base_p (x0
, strict
) && GET_CODE (x1
) == CONST_INT
)
579 return IN_RANGE (INTVAL (x1
), -1 - 0x7fff, 0x7fff);
590 #undef TARGET_LEGITIMATE_ADDRESS_P
591 #define TARGET_LEGITIMATE_ADDRESS_P bpf_legitimate_address_p
593 /* Describe the relative costs of RTL expressions. Return true when
594 all subexpressions of X have been processed, and false when
595 `rtx_cost' should recurse. */
598 bpf_rtx_costs (rtx x ATTRIBUTE_UNUSED
,
599 enum machine_mode mode ATTRIBUTE_UNUSED
,
600 int outer_code ATTRIBUTE_UNUSED
,
601 int opno ATTRIBUTE_UNUSED
,
602 int *total ATTRIBUTE_UNUSED
,
603 bool speed ATTRIBUTE_UNUSED
)
609 #undef TARGET_RTX_COSTS
610 #define TARGET_RTX_COSTS bpf_rtx_costs
612 /* Return true if an argument at the position indicated by CUM should
613 be passed by reference. If the hook returns true, a copy of that
614 argument is made in memory and a pointer to the argument is passed
615 instead of the argument itself. */
618 bpf_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED
,
619 const function_arg_info
&arg
)
621 unsigned num_bytes
= arg
.type_size_in_bytes ();
623 /* Pass aggregates and values bigger than 5 words by reference.
624 Everything else is passed by copy. */
625 return (arg
.aggregate_type_p () || (num_bytes
> 8*5));
628 #undef TARGET_PASS_BY_REFERENCE
629 #define TARGET_PASS_BY_REFERENCE bpf_pass_by_reference
631 /* Return a RTX indicating whether a function argument is passed in a
632 register and if so, which register. */
635 bpf_function_arg (cumulative_args_t ca
, const function_arg_info
&arg
)
637 CUMULATIVE_ARGS
*cum
= get_cumulative_args (ca
);
640 return gen_rtx_REG (arg
.mode
, *cum
+ 1);
642 /* An error will be emitted for this in
643 bpf_function_arg_advance. */
647 #undef TARGET_FUNCTION_ARG
648 #define TARGET_FUNCTION_ARG bpf_function_arg
650 /* Update the summarizer variable pointed by CA to advance past an
651 argument in the argument list. */
654 bpf_function_arg_advance (cumulative_args_t ca
,
655 const function_arg_info
&arg
)
657 CUMULATIVE_ARGS
*cum
= get_cumulative_args (ca
);
658 unsigned num_bytes
= arg
.type_size_in_bytes ();
659 unsigned num_words
= CEIL (num_bytes
, UNITS_PER_WORD
);
661 if (*cum
<= 5 && *cum
+ num_words
> 5)
663 /* Too many arguments for BPF. However, if the function is
664 gonna be inline for sure, we let it pass. Otherwise, issue
666 if (!lookup_attribute ("always_inline",
667 DECL_ATTRIBUTES (cfun
->decl
)))
668 error ("too many function arguments for eBPF");
674 #undef TARGET_FUNCTION_ARG_ADVANCE
675 #define TARGET_FUNCTION_ARG_ADVANCE bpf_function_arg_advance
677 /* Output the assembly code for a constructor. Since eBPF doesn't
678 support indirect calls, constructors are not supported. */
681 bpf_output_constructor (rtx symbol
, int priority ATTRIBUTE_UNUSED
)
683 tree decl
= SYMBOL_REF_DECL (symbol
);
686 sorry_at (DECL_SOURCE_LOCATION (decl
),
689 sorry ("no constructors");
692 #undef TARGET_ASM_CONSTRUCTOR
693 #define TARGET_ASM_CONSTRUCTOR bpf_output_constructor
695 /* Output the assembly code for a destructor. Since eBPF doesn't
696 support indirect calls, destructors are not supported. */
699 bpf_output_destructor (rtx symbol
, int priority ATTRIBUTE_UNUSED
)
701 tree decl
= SYMBOL_REF_DECL (symbol
);
704 sorry_at (DECL_SOURCE_LOCATION (decl
),
707 sorry ("no destructors");
710 #undef TARGET_ASM_DESTRUCTOR
711 #define TARGET_ASM_DESTRUCTOR bpf_output_destructor
713 /* Return the appropriate instruction to CALL to a function. TARGET
714 is an RTX denoting the address of the called function.
716 The main purposes of this function are:
717 - To reject indirect CALL instructions, which are not supported by
719 - To recognize calls to kernel helper functions and emit the
720 corresponding CALL N instruction.
722 This function is called from the expansion of the 'call' pattern in
726 bpf_output_call (rtx target
)
730 switch (GET_CODE (target
))
733 output_asm_insn ("call\t%0", &target
);
737 tree decl
= SYMBOL_REF_DECL (target
);
741 && (attr
= lookup_attribute ("kernel_helper",
742 DECL_ATTRIBUTES (decl
))))
744 tree attr_args
= TREE_VALUE (attr
);
746 xops
[0] = GEN_INT (TREE_INT_CST_LOW (TREE_VALUE (attr_args
)));
747 output_asm_insn ("call\t%0", xops
);
750 output_asm_insn ("call\t%0", &target
);
756 output_asm_insn ("call\t%0", &target
);
759 error ("indirect call in function, which are not supported by eBPF");
760 output_asm_insn ("call 0", NULL
);
768 /* Print register name according to assembly dialect. In normal
769 syntax registers are printed like %rN where N is the register
772 In pseudoc syntax, the register names do not feature a '%' prefix.
773 Additionally, the code 'w' denotes that the register should be
774 printed as wN instead of rN, where N is the register number, but
775 only when the value stored in the operand OP is 32-bit wide.
776 Finally, the code 'W' denotes that the register should be printed
777 as wN instead of rN, in all cases, regardless of the mode of the
778 value stored in the operand. */
781 bpf_print_register (FILE *file
, rtx op
, int code
)
783 if(asm_dialect
== ASM_NORMAL
)
784 fprintf (file
, "%s", reg_names
[REGNO (op
)]);
787 if (code
== 'W' || (code
== 'w' && GET_MODE_SIZE (GET_MODE (op
)) <= 4))
789 if (REGNO (op
) == BPF_FP
)
790 fprintf (file
, "w10");
792 fprintf (file
, "w%s", reg_names
[REGNO (op
)]+2);
796 if (REGNO (op
) == BPF_FP
)
797 fprintf (file
, "r10");
799 fprintf (file
, "%s", reg_names
[REGNO (op
)]+1);
804 /* Print an instruction operand. This function is called in the macro
805 PRINT_OPERAND defined in bpf.h */
808 bpf_print_operand (FILE *file
, rtx op
, int code
)
810 switch (GET_CODE (op
))
813 bpf_print_register (file
, op
, code
);
816 output_address (GET_MODE (op
), XEXP (op
, 0));
819 if (GET_MODE (op
) == VOIDmode
)
821 if (CONST_DOUBLE_HIGH (op
))
822 fprintf (file
, HOST_WIDE_INT_PRINT_DOUBLE_HEX
,
823 CONST_DOUBLE_HIGH (op
), CONST_DOUBLE_LOW (op
));
824 else if (CONST_DOUBLE_LOW (op
) < 0)
825 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
, CONST_DOUBLE_LOW (op
));
827 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, CONST_DOUBLE_LOW (op
));
832 real_to_target (vals
, CONST_DOUBLE_REAL_VALUE (op
), GET_MODE (op
));
833 vals
[0] &= 0xffffffff;
834 vals
[1] &= 0xffffffff;
835 if (GET_MODE (op
) == SFmode
)
836 fprintf (file
, "0x%08lx", vals
[0]);
837 else if (GET_MODE (op
) == DFmode
)
839 /* Note: real_to_target puts vals in target word order. */
840 if (WORDS_BIG_ENDIAN
)
841 fprintf (file
, "0x%08lx%08lx", vals
[0], vals
[1]);
843 fprintf (file
, "0x%08lx%08lx", vals
[1], vals
[0]);
850 output_addr_const (file
, op
);
854 /* Print an operand which is an address. This function should handle
855 any legit address, as accepted by bpf_legitimate_address_p, and
856 also addresses that are valid in CALL instructions.
858 This function is called in the PRINT_OPERAND_ADDRESS macro defined
862 bpf_print_operand_address (FILE *file
, rtx addr
)
864 switch (GET_CODE (addr
))
867 if (asm_dialect
== ASM_NORMAL
)
869 bpf_print_register (file
, addr
, 0);
870 fprintf (file
, asm_dialect
== ASM_NORMAL
? "+0]" : "+0");
874 rtx op0
= XEXP (addr
, 0);
875 rtx op1
= XEXP (addr
, 1);
877 if (GET_CODE (op0
) == REG
&& GET_CODE (op1
) == CONST_INT
)
879 if (asm_dialect
== ASM_NORMAL
)
881 bpf_print_register (file
, op0
, 0);
883 output_addr_const (file
, op1
);
884 if (asm_dialect
== ASM_NORMAL
)
888 fatal_insn ("invalid address in operand", addr
);
895 fatal_insn ("unsupported operand", addr
);
898 output_addr_const (file
, addr
);
903 /* Add a BPF builtin function with NAME, CODE and TYPE. Return
904 the function decl or NULL_TREE if the builtin was not added. */
907 def_builtin (const char *name
, enum bpf_builtins code
, tree type
)
910 = add_builtin_function (name
, type
, code
, BUILT_IN_MD
, NULL
, NULL
);
912 bpf_builtins
[code
] = t
;
917 /* Define machine-specific built-in functions. */
920 bpf_init_builtins (void)
922 tree ullt
= long_long_unsigned_type_node
;
924 /* Built-ins for BPF_LD_ABS and BPF_LD_IND instructions. */
926 def_builtin ("__builtin_bpf_load_byte", BPF_BUILTIN_LOAD_BYTE
,
927 build_function_type_list (ullt
, ullt
, 0));
928 def_builtin ("__builtin_bpf_load_half", BPF_BUILTIN_LOAD_HALF
,
929 build_function_type_list (ullt
, ullt
, 0));
930 def_builtin ("__builtin_bpf_load_word", BPF_BUILTIN_LOAD_WORD
,
931 build_function_type_list (ullt
, ullt
, 0));
933 def_builtin ("__builtin_preserve_access_index",
934 BPF_BUILTIN_PRESERVE_ACCESS_INDEX
,
935 build_function_type_list (ptr_type_node
, ptr_type_node
, 0));
936 def_builtin ("__builtin_preserve_field_info",
937 BPF_BUILTIN_PRESERVE_FIELD_INFO
,
938 build_function_type_list (unsigned_type_node
, ptr_type_node
,
939 unsigned_type_node
, 0));
940 def_builtin ("__builtin_btf_type_id",
941 BPF_BUILTIN_BTF_TYPE_ID
,
942 build_function_type_list (integer_type_node
, ptr_type_node
,
943 integer_type_node
, 0));
944 def_builtin ("__builtin_preserve_type_info",
945 BPF_BUILTIN_PRESERVE_TYPE_INFO
,
946 build_function_type_list (integer_type_node
, ptr_type_node
,
947 integer_type_node
, 0));
948 def_builtin ("__builtin_preserve_enum_value",
949 BPF_BUILTIN_PRESERVE_ENUM_VALUE
,
950 build_function_type_list (integer_type_node
, ptr_type_node
,
951 integer_type_node
, integer_type_node
,
954 def_builtin ("__builtin_core_reloc",
955 BPF_BUILTIN_CORE_RELOC
,
956 build_function_type_list (integer_type_node
,integer_type_node
,
958 DECL_PURE_P (bpf_builtins
[BPF_BUILTIN_CORE_RELOC
]) = 1;
960 bpf_init_core_builtins ();
963 #undef TARGET_INIT_BUILTINS
964 #define TARGET_INIT_BUILTINS bpf_init_builtins
966 /* Expand a call to a BPF-specific built-in function that was set up
967 with bpf_init_builtins. */
970 bpf_expand_builtin (tree exp
, rtx target ATTRIBUTE_UNUSED
,
971 rtx subtarget ATTRIBUTE_UNUSED
,
972 machine_mode mode ATTRIBUTE_UNUSED
,
973 int ignore ATTRIBUTE_UNUSED
)
975 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
976 int code
= DECL_MD_FUNCTION_CODE (fndecl
);
978 if (code
== BPF_BUILTIN_LOAD_BYTE
979 || code
== BPF_BUILTIN_LOAD_HALF
980 || code
== BPF_BUILTIN_LOAD_WORD
)
982 /* Expand an indirect load from the sk_buff in the context.
983 There is just one argument to the builtin, which is the
986 We try first to expand a ldabs* instruction. In case this
987 fails, we try a ldind* instruction. */
989 enum insn_code abs_icode
990 = (code
== BPF_BUILTIN_LOAD_BYTE
? CODE_FOR_ldabsb
991 : code
== BPF_BUILTIN_LOAD_HALF
? CODE_FOR_ldabsh
994 enum insn_code ind_icode
995 = (code
== BPF_BUILTIN_LOAD_BYTE
? CODE_FOR_ldindb
996 : code
== BPF_BUILTIN_LOAD_HALF
? CODE_FOR_ldindh
999 tree offset_arg
= CALL_EXPR_ARG (exp
, 0);
1000 struct expand_operand ops
[2];
1002 create_input_operand (&ops
[0], expand_normal (offset_arg
),
1003 TYPE_MODE (TREE_TYPE (offset_arg
)));
1004 create_input_operand (&ops
[1], const0_rtx
, SImode
);
1006 if (!maybe_expand_insn (abs_icode
, 2, ops
)
1007 && !maybe_expand_insn (ind_icode
, 2, ops
))
1009 error ("invalid argument to built-in function");
1010 return gen_rtx_REG (ops
[0].mode
, BPF_R0
);
1013 /* The result of the load is in R0. */
1014 return gen_rtx_REG (ops
[0].mode
, BPF_R0
);
1018 rtx ret
= bpf_expand_core_builtin (exp
, (enum bpf_builtins
) code
);
1019 if (ret
!= NULL_RTX
)
1023 error ("invalid built-in function at expansion");
1027 #undef TARGET_EXPAND_BUILTIN
1028 #define TARGET_EXPAND_BUILTIN bpf_expand_builtin
1031 bpf_resolve_overloaded_builtin (location_t loc
, tree fndecl
, void *arglist
)
1033 int code
= DECL_MD_FUNCTION_CODE (fndecl
);
1034 if (code
> BPF_CORE_BUILTINS_MARKER
)
1035 return bpf_resolve_overloaded_core_builtin (loc
, fndecl
, arglist
);
1040 #undef TARGET_RESOLVE_OVERLOADED_BUILTIN
1041 #define TARGET_RESOLVE_OVERLOADED_BUILTIN bpf_resolve_overloaded_builtin
1044 bpf_delegitimize_address (rtx rtl
)
1046 if (GET_CODE (rtl
) == UNSPEC
1047 && XINT (rtl
, 1) == UNSPEC_CORE_RELOC
)
1048 return XVECEXP (rtl
, 0, 0);
1053 #undef TARGET_DELEGITIMIZE_ADDRESS
1054 #define TARGET_DELEGITIMIZE_ADDRESS bpf_delegitimize_address
1056 /* Initialize target-specific function library calls. This is mainly
1057 used to call library-provided soft-fp operations, since eBPF
1058 doesn't support floating-point in "hardware". */
1061 bpf_init_libfuncs (void)
1063 set_conv_libfunc (sext_optab
, DFmode
, SFmode
,
1064 "__bpf_extendsfdf2");
1065 set_conv_libfunc (trunc_optab
, SFmode
, DFmode
,
1066 "__bpf_truncdfsf2");
1067 set_conv_libfunc (sfix_optab
, SImode
, DFmode
,
1068 "__bpf_fix_truncdfsi");
1069 set_conv_libfunc (sfloat_optab
, DFmode
, SImode
,
1071 set_conv_libfunc (ufloat_optab
, DFmode
, SImode
,
1072 "__bpf_floatunsidf");
1075 #undef TARGET_INIT_LIBFUNCS
1076 #define TARGET_INIT_LIBFUNCS bpf_init_libfuncs
1078 /* Define the mechanism that will be used for describing frame unwind
1079 information to the debugger. In eBPF it is not possible to unwind
1082 static enum unwind_info_type
1083 bpf_debug_unwind_info ()
1088 #undef TARGET_DEBUG_UNWIND_INFO
1089 #define TARGET_DEBUG_UNWIND_INFO bpf_debug_unwind_info
1091 /* Output assembly directives to assemble data of various sized and
1094 #undef TARGET_ASM_BYTE_OP
1095 #define TARGET_ASM_BYTE_OP "\t.byte\t"
1096 #undef TARGET_ASM_ALIGNED_HI_OP
1097 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
1098 #undef TARGET_ASM_ALIGNED_SI_OP
1099 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
1100 #undef TARGET_ASM_ALIGNED_DI_OP
1101 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
1103 /* Implement target hook small_register_classes_for_mode_p. */
1106 bpf_small_register_classes_for_mode_p (machine_mode mode
)
1111 /* Avoid putting function addresses in registers, as calling these
1112 is not supported in eBPF. */
1113 return (mode
!= FUNCTION_MODE
);
1116 #undef TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P
1117 #define TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P \
1118 bpf_small_register_classes_for_mode_p
1120 /* Finally, build the GCC target. */
1122 struct gcc_target targetm
= TARGET_INITIALIZER
;