1 // script.cc -- handle linker scripts for gold.
3 // Copyright 2006, 2007 Free Software Foundation, Inc.
4 // Written by Ian Lance Taylor <iant@google.com>.
6 // This file is part of gold.
8 // This program is free software; you can redistribute it and/or modify
9 // it under the terms of the GNU General Public License as published by
10 // the Free Software Foundation; either version 3 of the License, or
11 // (at your option) any later version.
13 // This program is distributed in the hope that it will be useful,
14 // but WITHOUT ANY WARRANTY; without even the implied warranty of
15 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 // GNU General Public License for more details.
18 // You should have received a copy of the GNU General Public License
19 // along with this program; if not, write to the Free Software
20 // Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
21 // MA 02110-1301, USA.
32 #include "workqueue.h"
41 // A token read from a script file. We don't implement keywords here;
42 // all keywords are simply represented as a string.
47 // Token classification.
52 // Token indicates end of input.
54 // Token is a string of characters.
56 // Token is an operator.
58 // Token is a number (an integer).
62 // We need an empty constructor so that we can put this STL objects.
64 : classification_(TOKEN_INVALID
), value_(), opcode_(0),
65 lineno_(0), charpos_(0)
68 // A general token with no value.
69 Token(Classification classification
, int lineno
, int charpos
)
70 : classification_(classification
), value_(), opcode_(0),
71 lineno_(lineno
), charpos_(charpos
)
73 gold_assert(classification
== TOKEN_INVALID
74 || classification
== TOKEN_EOF
);
77 // A general token with a value.
78 Token(Classification classification
, const std::string
& value
,
79 int lineno
, int charpos
)
80 : classification_(classification
), value_(value
), opcode_(0),
81 lineno_(lineno
), charpos_(charpos
)
83 gold_assert(classification
!= TOKEN_INVALID
84 && classification
!= TOKEN_EOF
);
87 // A token representing a string of characters.
88 Token(const std::string
& s
, int lineno
, int charpos
)
89 : classification_(TOKEN_STRING
), value_(s
), opcode_(0),
90 lineno_(lineno
), charpos_(charpos
)
93 // A token representing an operator.
94 Token(int opcode
, int lineno
, int charpos
)
95 : classification_(TOKEN_OPERATOR
), value_(), opcode_(opcode
),
96 lineno_(lineno
), charpos_(charpos
)
99 // Return whether the token is invalid.
102 { return this->classification_
== TOKEN_INVALID
; }
104 // Return whether this is an EOF token.
107 { return this->classification_
== TOKEN_EOF
; }
109 // Return the token classification.
111 classification() const
112 { return this->classification_
; }
114 // Return the line number at which the token starts.
117 { return this->lineno_
; }
119 // Return the character position at this the token starts.
122 { return this->charpos_
; }
124 // Get the value of a token.
129 gold_assert(this->classification_
== TOKEN_STRING
);
134 operator_value() const
136 gold_assert(this->classification_
== TOKEN_OPERATOR
);
137 return this->opcode_
;
141 integer_value() const
143 gold_assert(this->classification_
== TOKEN_INTEGER
);
144 return strtoll(this->value_
.c_str(), NULL
, 0);
148 // The token classification.
149 Classification classification_
;
150 // The token value, for TOKEN_STRING or TOKEN_INTEGER.
152 // The token value, for TOKEN_OPERATOR.
154 // The line number where this token started (one based).
156 // The character position within the line where this token started
161 // This class handles lexing a file into a sequence of tokens. We
162 // don't expect linker scripts to be large, so we just read them and
163 // tokenize them all at once.
168 Lex(Input_file
* input_file
)
169 : input_file_(input_file
), tokens_()
172 // Tokenize the file. Return the final token, which will be either
173 // an invalid token or an EOF token. An invalid token indicates
174 // that tokenization failed.
179 typedef std::vector
<Token
> Token_sequence
;
181 // Return the tokens.
182 const Token_sequence
&
184 { return this->tokens_
; }
188 Lex
& operator=(const Lex
&);
190 // Read the file into a string buffer.
192 read_file(std::string
*);
194 // Make a general token with no value at the current location.
196 make_token(Token::Classification c
, const char* p
) const
197 { return Token(c
, this->lineno_
, p
- this->linestart_
+ 1); }
199 // Make a general token with a value at the current location.
201 make_token(Token::Classification c
, const std::string
& v
, const char* p
)
203 { return Token(c
, v
, this->lineno_
, p
- this->linestart_
+ 1); }
205 // Make an operator token at the current location.
207 make_token(int opcode
, const char* p
) const
208 { return Token(opcode
, this->lineno_
, p
- this->linestart_
+ 1); }
210 // Make an invalid token at the current location.
212 make_invalid_token(const char* p
)
213 { return this->make_token(Token::TOKEN_INVALID
, p
); }
215 // Make an EOF token at the current location.
217 make_eof_token(const char* p
)
218 { return this->make_token(Token::TOKEN_EOF
, p
); }
220 // Return whether C can be the first character in a name. C2 is the
221 // next character, since we sometimes need that.
223 can_start_name(char c
, char c2
);
225 // Return whether C can appear in a name which has already started.
227 can_continue_name(char c
);
229 // Return whether C, C2, C3 can start a hex number.
231 can_start_hex(char c
, char c2
, char c3
);
233 // Return whether C can appear in a hex number.
235 can_continue_hex(char c
);
237 // Return whether C can start a non-hex number.
239 can_start_number(char c
);
241 // Return whether C can appear in a non-hex number.
243 can_continue_number(char c
)
244 { return Lex::can_start_number(c
); }
246 // If C1 C2 C3 form a valid three character operator, return the
247 // opcode. Otherwise return 0.
249 three_char_operator(char c1
, char c2
, char c3
);
251 // If C1 C2 form a valid two character operator, return the opcode.
252 // Otherwise return 0.
254 two_char_operator(char c1
, char c2
);
256 // If C1 is a valid one character operator, return the opcode.
257 // Otherwise return 0.
259 one_char_operator(char c1
);
261 // Read the next token.
263 get_token(const char**);
265 // Skip a C style /* */ comment. Return false if the comment did
268 skip_c_comment(const char**);
270 // Skip a line # comment. Return false if there was no newline.
272 skip_line_comment(const char**);
274 // Build a token CLASSIFICATION from all characters that match
275 // CAN_CONTINUE_FN. The token starts at START. Start matching from
276 // MATCH. Set *PP to the character following the token.
278 gather_token(Token::Classification
, bool (*can_continue_fn
)(char),
279 const char* start
, const char* match
, const char** pp
);
281 // Build a token from a quoted string.
283 gather_quoted_string(const char** pp
);
285 // The file we are reading.
286 Input_file
* input_file_
;
287 // The token sequence we create.
288 Token_sequence tokens_
;
289 // The current line number.
291 // The start of the current line in the buffer.
292 const char* linestart_
;
295 // Read the whole file into memory. We don't expect linker scripts to
296 // be large, so we just use a std::string as a buffer. We ignore the
297 // data we've already read, so that we read aligned buffers.
300 Lex::read_file(std::string
* contents
)
302 off_t filesize
= this->input_file_
->file().filesize();
304 contents
->reserve(filesize
);
307 unsigned char buf
[BUFSIZ
];
308 while (off
< filesize
)
311 if (get
> filesize
- off
)
312 get
= filesize
- off
;
313 this->input_file_
->file().read(off
, get
, buf
);
314 contents
->append(reinterpret_cast<char*>(&buf
[0]), get
);
319 // Return whether C can be the start of a name, if the next character
320 // is C2. A name can being with a letter, underscore, period, or
321 // dollar sign. Because a name can be a file name, we also permit
322 // forward slash, backslash, and tilde. Tilde is the tricky case
323 // here; GNU ld also uses it as a bitwise not operator. It is only
324 // recognized as the operator if it is not immediately followed by
325 // some character which can appear in a symbol. That is, "~0" is a
326 // symbol name, and "~ 0" is an expression using bitwise not. We are
330 Lex::can_start_name(char c
, char c2
)
334 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
335 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
336 case 'M': case 'N': case 'O': case 'Q': case 'P': case 'R':
337 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
339 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
340 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
341 case 'm': case 'n': case 'o': case 'q': case 'p': case 'r':
342 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
344 case '_': case '.': case '$': case '/': case '\\':
348 return can_continue_name(c2
);
355 // Return whether C can continue a name which has already started.
356 // Subsequent characters in a name are the same as the leading
357 // characters, plus digits and "=+-:[],?*". So in general the linker
358 // script language requires spaces around operators.
361 Lex::can_continue_name(char c
)
365 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
366 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
367 case 'M': case 'N': case 'O': case 'Q': case 'P': case 'R':
368 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
370 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
371 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
372 case 'm': case 'n': case 'o': case 'q': case 'p': case 'r':
373 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
375 case '_': case '.': case '$': case '/': case '\\':
377 case '0': case '1': case '2': case '3': case '4':
378 case '5': case '6': case '7': case '8': case '9':
379 case '=': case '+': case '-': case ':': case '[': case ']':
380 case ',': case '?': case '*':
388 // For a number we accept 0x followed by hex digits, or any sequence
389 // of digits. The old linker accepts leading '$' for hex, and
390 // trailing HXBOD. Those are for MRI compatibility and we don't
391 // accept them. The old linker also accepts trailing MK for mega or
392 // kilo. Those are mentioned in the documentation, and we accept
395 // Return whether C1 C2 C3 can start a hex number.
398 Lex::can_start_hex(char c1
, char c2
, char c3
)
400 if (c1
== '0' && (c2
== 'x' || c2
== 'X'))
401 return Lex::can_continue_hex(c3
);
405 // Return whether C can appear in a hex number.
408 Lex::can_continue_hex(char c
)
412 case '0': case '1': case '2': case '3': case '4':
413 case '5': case '6': case '7': case '8': case '9':
414 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
415 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
423 // Return whether C can start a non-hex number.
426 Lex::can_start_number(char c
)
430 case '0': case '1': case '2': case '3': case '4':
431 case '5': case '6': case '7': case '8': case '9':
439 // If C1 C2 C3 form a valid three character operator, return the
440 // opcode (defined in the yyscript.h file generated from yyscript.y).
441 // Otherwise return 0.
444 Lex::three_char_operator(char c1
, char c2
, char c3
)
449 if (c2
== '<' && c3
== '=')
453 if (c2
== '>' && c3
== '=')
462 // If C1 C2 form a valid two character operator, return the opcode
463 // (defined in the yyscript.h file generated from yyscript.y).
464 // Otherwise return 0.
467 Lex::two_char_operator(char c1
, char c2
)
525 // If C1 is a valid operator, return the opcode. Otherwise return 0.
528 Lex::one_char_operator(char c1
)
561 // Skip a C style comment. *PP points to just after the "/*". Return
562 // false if the comment did not end.
565 Lex::skip_c_comment(const char** pp
)
568 while (p
[0] != '*' || p
[1] != '/')
579 this->linestart_
= p
+ 1;
588 // Skip a line # comment. Return false if there was no newline.
591 Lex::skip_line_comment(const char** pp
)
594 size_t skip
= strcspn(p
, "\n");
603 this->linestart_
= p
;
609 // Build a token CLASSIFICATION from all characters that match
610 // CAN_CONTINUE_FN. Update *PP.
613 Lex::gather_token(Token::Classification classification
,
614 bool (*can_continue_fn
)(char),
619 while ((*can_continue_fn
)(*match
))
622 return this->make_token(classification
,
623 std::string(start
, match
- start
),
627 // Build a token from a quoted string.
630 Lex::gather_quoted_string(const char** pp
)
632 const char* start
= *pp
;
633 const char* p
= start
;
635 size_t skip
= strcspn(p
, "\"\n");
637 return this->make_invalid_token(start
);
639 return this->make_token(Token::TOKEN_STRING
,
640 std::string(p
, skip
),
644 // Return the next token at *PP. Update *PP. General guideline: we
645 // require linker scripts to be simple ASCII. No unicode linker
646 // scripts. In particular we can assume that any '\0' is the end of
650 Lex::get_token(const char** pp
)
659 return this->make_eof_token(p
);
662 // Skip whitespace quickly.
663 while (*p
== ' ' || *p
== '\t')
670 this->linestart_
= p
;
674 // Skip C style comments.
675 if (p
[0] == '/' && p
[1] == '*')
677 int lineno
= this->lineno_
;
678 int charpos
= p
- this->linestart_
+ 1;
681 if (!this->skip_c_comment(pp
))
682 return Token(Token::TOKEN_INVALID
, lineno
, charpos
);
688 // Skip line comments.
692 if (!this->skip_line_comment(pp
))
693 return this->make_eof_token(p
);
699 if (Lex::can_start_name(p
[0], p
[1]))
700 return this->gather_token(Token::TOKEN_STRING
,
701 Lex::can_continue_name
,
704 // We accept any arbitrary name in double quotes, as long as it
705 // does not cross a line boundary.
709 return this->gather_quoted_string(pp
);
712 // Check for a number.
714 if (Lex::can_start_hex(p
[0], p
[1], p
[2]))
715 return this->gather_token(Token::TOKEN_INTEGER
,
716 Lex::can_continue_hex
,
719 if (Lex::can_start_number(p
[0]))
720 return this->gather_token(Token::TOKEN_INTEGER
,
721 Lex::can_continue_number
,
724 // Check for operators.
726 int opcode
= Lex::three_char_operator(p
[0], p
[1], p
[2]);
730 return this->make_token(opcode
, p
);
733 opcode
= Lex::two_char_operator(p
[0], p
[1]);
737 return this->make_token(opcode
, p
);
740 opcode
= Lex::one_char_operator(p
[0]);
744 return this->make_token(opcode
, p
);
747 return this->make_token(Token::TOKEN_INVALID
, p
);
751 // Tokenize the file. Return the final token.
756 std::string contents
;
757 this->read_file(&contents
);
759 const char* p
= contents
.c_str();
762 this->linestart_
= p
;
766 Token
t(this->get_token(&p
));
768 // Don't let an early null byte fool us into thinking that we've
769 // reached the end of the file.
771 && static_cast<size_t>(p
- contents
.c_str()) < contents
.length())
772 t
= this->make_invalid_token(p
);
774 if (t
.is_invalid() || t
.is_eof())
777 this->tokens_
.push_back(t
);
781 // A trivial task which waits for THIS_BLOCKER to be clear and then
782 // clears NEXT_BLOCKER. THIS_BLOCKER may be NULL.
784 class Script_unblock
: public Task
787 Script_unblock(Task_token
* this_blocker
, Task_token
* next_blocker
)
788 : this_blocker_(this_blocker
), next_blocker_(next_blocker
)
793 if (this->this_blocker_
!= NULL
)
794 delete this->this_blocker_
;
798 is_runnable(Workqueue
*)
800 if (this->this_blocker_
!= NULL
&& this->this_blocker_
->is_blocked())
806 locks(Workqueue
* workqueue
)
808 return new Task_locker_block(*this->next_blocker_
, workqueue
);
816 Task_token
* this_blocker_
;
817 Task_token
* next_blocker_
;
820 // This class holds data passed through the parser to the lexer and to
821 // the parser support functions. This avoids global variables. We
822 // can't use global variables because we need not be called in the
828 Parser_closure(const char* filename
,
829 const Position_dependent_options
& posdep_options
,
831 const Lex::Token_sequence
* tokens
)
832 : filename_(filename
), posdep_options_(posdep_options
),
833 in_group_(in_group
), tokens_(tokens
),
834 next_token_index_(0), inputs_(NULL
)
837 // Return the file name.
840 { return this->filename_
; }
842 // Return the position dependent options. The caller may modify
844 Position_dependent_options
&
845 position_dependent_options()
846 { return this->posdep_options_
; }
848 // Return whether this script is being run in a group.
851 { return this->in_group_
; }
853 // Whether we are at the end of the token list.
856 { return this->next_token_index_
>= this->tokens_
->size(); }
858 // Return the next token.
862 const Token
* ret
= &(*this->tokens_
)[this->next_token_index_
];
863 ++this->next_token_index_
;
867 // Return the list of input files, creating it if necessary. This
868 // is a space leak--we never free the INPUTS_ pointer.
872 if (this->inputs_
== NULL
)
873 this->inputs_
= new Input_arguments();
874 return this->inputs_
;
877 // Return whether we saw any input files.
880 { return this->inputs_
!= NULL
&& !this->inputs_
->empty(); }
883 // The name of the file we are reading.
884 const char* filename_
;
885 // The position dependent options.
886 Position_dependent_options posdep_options_
;
887 // Whether we are currently in a --start-group/--end-group.
890 // The tokens to be returned by the lexer.
891 const Lex::Token_sequence
* tokens_
;
892 // The index of the next token to return.
893 unsigned int next_token_index_
;
894 // New input files found to add to the link.
895 Input_arguments
* inputs_
;
898 // FILE was found as an argument on the command line. Try to read it
899 // as a script. We've already read BYTES of data into P, but we
900 // ignore that. Return true if the file was handled.
903 read_input_script(Workqueue
* workqueue
, const General_options
& options
,
904 Symbol_table
* symtab
, Layout
* layout
,
905 const Dirsearch
& dirsearch
, Input_objects
* input_objects
,
906 Input_group
* input_group
,
907 const Input_argument
* input_argument
,
908 Input_file
* input_file
, const unsigned char*, off_t
,
909 Task_token
* this_blocker
, Task_token
* next_blocker
)
912 if (lex
.tokenize().is_invalid())
915 Parser_closure
closure(input_file
->filename().c_str(),
916 input_argument
->file().options(),
920 if (yyparse(&closure
) != 0)
923 // THIS_BLOCKER must be clear before we may add anything to the
924 // symbol table. We are responsible for unblocking NEXT_BLOCKER
925 // when we are done. We are responsible for deleting THIS_BLOCKER
926 // when it is unblocked.
928 if (!closure
.saw_inputs())
930 // The script did not add any files to read. Note that we are
931 // not permitted to call NEXT_BLOCKER->unblock() here even if
932 // THIS_BLOCKER is NULL, as we are not in the main thread.
933 workqueue
->queue(new Script_unblock(this_blocker
, next_blocker
));
937 for (Input_arguments::const_iterator p
= closure
.inputs()->begin();
938 p
!= closure
.inputs()->end();
942 if (p
+ 1 == closure
.inputs()->end())
946 nb
= new Task_token();
949 workqueue
->queue(new Read_symbols(options
, input_objects
, symtab
,
950 layout
, dirsearch
, &*p
,
951 input_group
, this_blocker
, nb
));
958 // Manage mapping from keywords to the codes expected by the bison
961 class Keyword_to_parsecode
964 // The structure which maps keywords to parsecodes.
965 struct Keyword_parsecode
969 // Corresponding parsecode.
973 // Return the parsecode corresponding KEYWORD, or 0 if it is not a
976 keyword_to_parsecode(const char* keyword
);
979 // The array of all keywords.
980 static const Keyword_parsecode keyword_parsecodes_
[];
982 // The number of keywords.
983 static const int keyword_count
;
986 // Mapping from keyword string to keyword parsecode. This array must
987 // be kept in sorted order. Parsecodes are looked up using bsearch.
988 // This array must correspond to the list of parsecodes in yyscript.y.
990 const Keyword_to_parsecode::Keyword_parsecode
991 Keyword_to_parsecode::keyword_parsecodes_
[] =
993 { "ABSOLUTE", ABSOLUTE
},
995 { "ALIGN", ALIGN_K
},
996 { "ASSERT", ASSERT_K
},
997 { "AS_NEEDED", AS_NEEDED
},
1002 { "CONSTANT", CONSTANT
},
1003 { "CONSTRUCTORS", CONSTRUCTORS
},
1005 { "CREATE_OBJECT_SYMBOLS", CREATE_OBJECT_SYMBOLS
},
1006 { "DATA_SEGMENT_ALIGN", DATA_SEGMENT_ALIGN
},
1007 { "DATA_SEGMENT_END", DATA_SEGMENT_END
},
1008 { "DATA_SEGMENT_RELRO_END", DATA_SEGMENT_RELRO_END
},
1009 { "DEFINED", DEFINED
},
1012 { "EXCLUDE_FILE", EXCLUDE_FILE
},
1013 { "EXTERN", EXTERN
},
1016 { "FORCE_COMMON_ALLOCATION", FORCE_COMMON_ALLOCATION
},
1019 { "INCLUDE", INCLUDE
},
1021 { "INHIBIT_COMMON_ALLOCATION", INHIBIT_COMMON_ALLOCATION
},
1024 { "LENGTH", LENGTH
},
1025 { "LOADADDR", LOADADDR
},
1029 { "MEMORY", MEMORY
},
1032 { "NOCROSSREFS", NOCROSSREFS
},
1033 { "NOFLOAT", NOFLOAT
},
1034 { "NOLOAD", NOLOAD
},
1035 { "ONLY_IF_RO", ONLY_IF_RO
},
1036 { "ONLY_IF_RW", ONLY_IF_RW
},
1037 { "ORIGIN", ORIGIN
},
1038 { "OUTPUT", OUTPUT
},
1039 { "OUTPUT_ARCH", OUTPUT_ARCH
},
1040 { "OUTPUT_FORMAT", OUTPUT_FORMAT
},
1041 { "OVERLAY", OVERLAY
},
1043 { "PROVIDE", PROVIDE
},
1044 { "PROVIDE_HIDDEN", PROVIDE_HIDDEN
},
1046 { "SEARCH_DIR", SEARCH_DIR
},
1047 { "SECTIONS", SECTIONS
},
1048 { "SEGMENT_START", SEGMENT_START
},
1050 { "SIZEOF", SIZEOF
},
1051 { "SIZEOF_HEADERS", SIZEOF_HEADERS
},
1052 { "SORT_BY_ALIGNMENT", SORT_BY_ALIGNMENT
},
1053 { "SORT_BY_NAME", SORT_BY_NAME
},
1054 { "SPECIAL", SPECIAL
},
1056 { "STARTUP", STARTUP
},
1057 { "SUBALIGN", SUBALIGN
},
1058 { "SYSLIB", SYSLIB
},
1059 { "TARGET", TARGET_K
},
1060 { "TRUNCATE", TRUNCATE
},
1061 { "VERSION", VERSIONK
},
1062 { "global", GLOBAL
},
1068 { "sizeof_headers", SIZEOF_HEADERS
},
1071 const int Keyword_to_parsecode::keyword_count
=
1072 (sizeof(Keyword_to_parsecode::keyword_parsecodes_
)
1073 / sizeof(Keyword_to_parsecode::keyword_parsecodes_
[0]));
1075 // Comparison function passed to bsearch.
1081 ktt_compare(const void* keyv
, const void* kttv
)
1083 const char* key
= static_cast<const char*>(keyv
);
1084 const Keyword_to_parsecode::Keyword_parsecode
* ktt
=
1085 static_cast<const Keyword_to_parsecode::Keyword_parsecode
*>(kttv
);
1086 return strcmp(key
, ktt
->keyword
);
1089 } // End extern "C".
1092 Keyword_to_parsecode::keyword_to_parsecode(const char* keyword
)
1094 void* kttv
= bsearch(keyword
,
1095 Keyword_to_parsecode::keyword_parsecodes_
,
1096 Keyword_to_parsecode::keyword_count
,
1097 sizeof(Keyword_to_parsecode::keyword_parsecodes_
[0]),
1101 Keyword_parsecode
* ktt
= static_cast<Keyword_parsecode
*>(kttv
);
1102 return ktt
->parsecode
;
1105 } // End namespace gold.
1107 // The remaining functions are extern "C", so it's clearer to not put
1108 // them in namespace gold.
1110 using namespace gold
;
1112 // This function is called by the bison parser to return the next
1116 yylex(YYSTYPE
* lvalp
, void* closurev
)
1118 Parser_closure
* closure
= static_cast<Parser_closure
*>(closurev
);
1120 if (closure
->at_eof())
1123 const Token
* token
= closure
->next_token();
1125 switch (token
->classification())
1128 case Token::TOKEN_INVALID
:
1129 case Token::TOKEN_EOF
:
1132 case Token::TOKEN_STRING
:
1134 const char* str
= token
->string_value().c_str();
1135 int parsecode
= Keyword_to_parsecode::keyword_to_parsecode(str
);
1138 lvalp
->string
= str
;
1142 case Token::TOKEN_OPERATOR
:
1143 return token
->operator_value();
1145 case Token::TOKEN_INTEGER
:
1146 lvalp
->integer
= token
->integer_value();
1151 // This function is called by the bison parser to report an error.
1154 yyerror(void* closurev
, const char* message
)
1156 Parser_closure
* closure
= static_cast<Parser_closure
*>(closurev
);
1158 fprintf(stderr
, _("%s: %s: %s\n"),
1159 program_name
, closure
->filename(), message
);
1163 // Called by the bison parser to add a file to the link.
1166 script_add_file(void* closurev
, const char* name
)
1168 Parser_closure
* closure
= static_cast<Parser_closure
*>(closurev
);
1169 std::string absname
;
1176 // Prepend `dirname closure->filename()` to make the path absolute.
1177 char *slash
= strrchr(closure
->filename(), '/');
1178 absname
.assign(closure
->filename(),
1179 slash
? slash
- closure
->filename() + 1 : 0);
1182 Input_file_argument
file(absname
.c_str(), false, closure
->position_dependent_options());
1183 closure
->inputs()->add_file(file
);
1186 // Called by the bison parser to start a group. If we are already in
1187 // a group, that means that this script was invoked within a
1188 // --start-group --end-group sequence on the command line, or that
1189 // this script was found in a GROUP of another script. In that case,
1190 // we simply continue the existing group, rather than starting a new
1191 // one. It is possible to construct a case in which this will do
1192 // something other than what would happen if we did a recursive group,
1193 // but it's hard to imagine why the different behaviour would be
1194 // useful for a real program. Avoiding recursive groups is simpler
1195 // and more efficient.
1198 script_start_group(void* closurev
)
1200 Parser_closure
* closure
= static_cast<Parser_closure
*>(closurev
);
1201 if (!closure
->in_group())
1202 closure
->inputs()->start_group();
1205 // Called by the bison parser at the end of a group.
1208 script_end_group(void* closurev
)
1210 Parser_closure
* closure
= static_cast<Parser_closure
*>(closurev
);
1211 if (!closure
->in_group())
1212 closure
->inputs()->end_group();
1215 // Called by the bison parser to start an AS_NEEDED list.
1218 script_start_as_needed(void* closurev
)
1220 Parser_closure
* closure
= static_cast<Parser_closure
*>(closurev
);
1221 closure
->position_dependent_options().set_as_needed();
1224 // Called by the bison parser at the end of an AS_NEEDED list.
1227 script_end_as_needed(void* closurev
)
1229 Parser_closure
* closure
= static_cast<Parser_closure
*>(closurev
);
1230 closure
->position_dependent_options().clear_as_needed();