libgo: update to Go 1.11
[official-gcc.git] / libgo / go / go / parser / parser.go
blob189bfb42236ecf6b1d44ff5218dd6242036f0231
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Package parser implements a parser for Go source files. Input may be
6 // provided in a variety of forms (see the various Parse* functions); the
7 // output is an abstract syntax tree (AST) representing the Go source. The
8 // parser is invoked through one of the Parse* functions.
9 //
10 // The parser accepts a larger language than is syntactically permitted by
11 // the Go spec, for simplicity, and for improved robustness in the presence
12 // of syntax errors. For instance, in method declarations, the receiver is
13 // treated like an ordinary parameter list and thus may contain multiple
14 // entries where the spec permits exactly one. Consequently, the corresponding
15 // field in the AST (ast.FuncDecl.Recv) field is not restricted to one entry.
17 package parser
19 import (
20 "fmt"
21 "go/ast"
22 "go/scanner"
23 "go/token"
24 "strconv"
25 "strings"
26 "unicode"
29 // The parser structure holds the parser's internal state.
30 type parser struct {
31 file *token.File
32 errors scanner.ErrorList
33 scanner scanner.Scanner
35 // Tracing/debugging
36 mode Mode // parsing mode
37 trace bool // == (mode & Trace != 0)
38 indent int // indentation used for tracing output
40 // Comments
41 comments []*ast.CommentGroup
42 leadComment *ast.CommentGroup // last lead comment
43 lineComment *ast.CommentGroup // last line comment
45 // Next token
46 pos token.Pos // token position
47 tok token.Token // one token look-ahead
48 lit string // token literal
50 // Error recovery
51 // (used to limit the number of calls to parser.advance
52 // w/o making scanning progress - avoids potential endless
53 // loops across multiple parser functions during error recovery)
54 syncPos token.Pos // last synchronization position
55 syncCnt int // number of parser.advance calls without progress
57 // Non-syntactic parser control
58 exprLev int // < 0: in control clause, >= 0: in expression
59 inRhs bool // if set, the parser is parsing a rhs expression
61 // Ordinary identifier scopes
62 pkgScope *ast.Scope // pkgScope.Outer == nil
63 topScope *ast.Scope // top-most scope; may be pkgScope
64 unresolved []*ast.Ident // unresolved identifiers
65 imports []*ast.ImportSpec // list of imports
67 // Label scopes
68 // (maintained by open/close LabelScope)
69 labelScope *ast.Scope // label scope for current function
70 targetStack [][]*ast.Ident // stack of unresolved labels
73 func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode Mode) {
74 p.file = fset.AddFile(filename, -1, len(src))
75 var m scanner.Mode
76 if mode&ParseComments != 0 {
77 m = scanner.ScanComments
79 eh := func(pos token.Position, msg string) { p.errors.Add(pos, msg) }
80 p.scanner.Init(p.file, src, eh, m)
82 p.mode = mode
83 p.trace = mode&Trace != 0 // for convenience (p.trace is used frequently)
85 p.next()
88 // ----------------------------------------------------------------------------
89 // Scoping support
91 func (p *parser) openScope() {
92 p.topScope = ast.NewScope(p.topScope)
95 func (p *parser) closeScope() {
96 p.topScope = p.topScope.Outer
99 func (p *parser) openLabelScope() {
100 p.labelScope = ast.NewScope(p.labelScope)
101 p.targetStack = append(p.targetStack, nil)
104 func (p *parser) closeLabelScope() {
105 // resolve labels
106 n := len(p.targetStack) - 1
107 scope := p.labelScope
108 for _, ident := range p.targetStack[n] {
109 ident.Obj = scope.Lookup(ident.Name)
110 if ident.Obj == nil && p.mode&DeclarationErrors != 0 {
111 p.error(ident.Pos(), fmt.Sprintf("label %s undefined", ident.Name))
114 // pop label scope
115 p.targetStack = p.targetStack[0:n]
116 p.labelScope = p.labelScope.Outer
119 func (p *parser) declare(decl, data interface{}, scope *ast.Scope, kind ast.ObjKind, idents ...*ast.Ident) {
120 for _, ident := range idents {
121 assert(ident.Obj == nil, "identifier already declared or resolved")
122 obj := ast.NewObj(kind, ident.Name)
123 // remember the corresponding declaration for redeclaration
124 // errors and global variable resolution/typechecking phase
125 obj.Decl = decl
126 obj.Data = data
127 ident.Obj = obj
128 if ident.Name != "_" {
129 if alt := scope.Insert(obj); alt != nil && p.mode&DeclarationErrors != 0 {
130 prevDecl := ""
131 if pos := alt.Pos(); pos.IsValid() {
132 prevDecl = fmt.Sprintf("\n\tprevious declaration at %s", p.file.Position(pos))
134 p.error(ident.Pos(), fmt.Sprintf("%s redeclared in this block%s", ident.Name, prevDecl))
140 func (p *parser) shortVarDecl(decl *ast.AssignStmt, list []ast.Expr) {
141 // Go spec: A short variable declaration may redeclare variables
142 // provided they were originally declared in the same block with
143 // the same type, and at least one of the non-blank variables is new.
144 n := 0 // number of new variables
145 for _, x := range list {
146 if ident, isIdent := x.(*ast.Ident); isIdent {
147 assert(ident.Obj == nil, "identifier already declared or resolved")
148 obj := ast.NewObj(ast.Var, ident.Name)
149 // remember corresponding assignment for other tools
150 obj.Decl = decl
151 ident.Obj = obj
152 if ident.Name != "_" {
153 if alt := p.topScope.Insert(obj); alt != nil {
154 ident.Obj = alt // redeclaration
155 } else {
156 n++ // new declaration
159 } else {
160 p.errorExpected(x.Pos(), "identifier on left side of :=")
163 if n == 0 && p.mode&DeclarationErrors != 0 {
164 p.error(list[0].Pos(), "no new variables on left side of :=")
168 // The unresolved object is a sentinel to mark identifiers that have been added
169 // to the list of unresolved identifiers. The sentinel is only used for verifying
170 // internal consistency.
171 var unresolved = new(ast.Object)
173 // If x is an identifier, tryResolve attempts to resolve x by looking up
174 // the object it denotes. If no object is found and collectUnresolved is
175 // set, x is marked as unresolved and collected in the list of unresolved
176 // identifiers.
178 func (p *parser) tryResolve(x ast.Expr, collectUnresolved bool) {
179 // nothing to do if x is not an identifier or the blank identifier
180 ident, _ := x.(*ast.Ident)
181 if ident == nil {
182 return
184 assert(ident.Obj == nil, "identifier already declared or resolved")
185 if ident.Name == "_" {
186 return
188 // try to resolve the identifier
189 for s := p.topScope; s != nil; s = s.Outer {
190 if obj := s.Lookup(ident.Name); obj != nil {
191 ident.Obj = obj
192 return
195 // all local scopes are known, so any unresolved identifier
196 // must be found either in the file scope, package scope
197 // (perhaps in another file), or universe scope --- collect
198 // them so that they can be resolved later
199 if collectUnresolved {
200 ident.Obj = unresolved
201 p.unresolved = append(p.unresolved, ident)
205 func (p *parser) resolve(x ast.Expr) {
206 p.tryResolve(x, true)
209 // ----------------------------------------------------------------------------
210 // Parsing support
212 func (p *parser) printTrace(a ...interface{}) {
213 const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
214 const n = len(dots)
215 pos := p.file.Position(p.pos)
216 fmt.Printf("%5d:%3d: ", pos.Line, pos.Column)
217 i := 2 * p.indent
218 for i > n {
219 fmt.Print(dots)
220 i -= n
222 // i <= n
223 fmt.Print(dots[0:i])
224 fmt.Println(a...)
227 func trace(p *parser, msg string) *parser {
228 p.printTrace(msg, "(")
229 p.indent++
230 return p
233 // Usage pattern: defer un(trace(p, "..."))
234 func un(p *parser) {
235 p.indent--
236 p.printTrace(")")
239 // Advance to the next token.
240 func (p *parser) next0() {
241 // Because of one-token look-ahead, print the previous token
242 // when tracing as it provides a more readable output. The
243 // very first token (!p.pos.IsValid()) is not initialized
244 // (it is token.ILLEGAL), so don't print it .
245 if p.trace && p.pos.IsValid() {
246 s := p.tok.String()
247 switch {
248 case p.tok.IsLiteral():
249 p.printTrace(s, p.lit)
250 case p.tok.IsOperator(), p.tok.IsKeyword():
251 p.printTrace("\"" + s + "\"")
252 default:
253 p.printTrace(s)
257 p.pos, p.tok, p.lit = p.scanner.Scan()
260 // Consume a comment and return it and the line on which it ends.
261 func (p *parser) consumeComment() (comment *ast.Comment, endline int) {
262 // /*-style comments may end on a different line than where they start.
263 // Scan the comment for '\n' chars and adjust endline accordingly.
264 endline = p.file.Line(p.pos)
265 if p.lit[1] == '*' {
266 // don't use range here - no need to decode Unicode code points
267 for i := 0; i < len(p.lit); i++ {
268 if p.lit[i] == '\n' {
269 endline++
274 comment = &ast.Comment{Slash: p.pos, Text: p.lit}
275 p.next0()
277 return
280 // Consume a group of adjacent comments, add it to the parser's
281 // comments list, and return it together with the line at which
282 // the last comment in the group ends. A non-comment token or n
283 // empty lines terminate a comment group.
285 func (p *parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
286 var list []*ast.Comment
287 endline = p.file.Line(p.pos)
288 for p.tok == token.COMMENT && p.file.Line(p.pos) <= endline+n {
289 var comment *ast.Comment
290 comment, endline = p.consumeComment()
291 list = append(list, comment)
294 // add comment group to the comments list
295 comments = &ast.CommentGroup{List: list}
296 p.comments = append(p.comments, comments)
298 return
301 // Advance to the next non-comment token. In the process, collect
302 // any comment groups encountered, and remember the last lead and
303 // and line comments.
305 // A lead comment is a comment group that starts and ends in a
306 // line without any other tokens and that is followed by a non-comment
307 // token on the line immediately after the comment group.
309 // A line comment is a comment group that follows a non-comment
310 // token on the same line, and that has no tokens after it on the line
311 // where it ends.
313 // Lead and line comments may be considered documentation that is
314 // stored in the AST.
316 func (p *parser) next() {
317 p.leadComment = nil
318 p.lineComment = nil
319 prev := p.pos
320 p.next0()
322 if p.tok == token.COMMENT {
323 var comment *ast.CommentGroup
324 var endline int
326 if p.file.Line(p.pos) == p.file.Line(prev) {
327 // The comment is on same line as the previous token; it
328 // cannot be a lead comment but may be a line comment.
329 comment, endline = p.consumeCommentGroup(0)
330 if p.file.Line(p.pos) != endline || p.tok == token.EOF {
331 // The next token is on a different line, thus
332 // the last comment group is a line comment.
333 p.lineComment = comment
337 // consume successor comments, if any
338 endline = -1
339 for p.tok == token.COMMENT {
340 comment, endline = p.consumeCommentGroup(1)
343 if endline+1 == p.file.Line(p.pos) {
344 // The next token is following on the line immediately after the
345 // comment group, thus the last comment group is a lead comment.
346 p.leadComment = comment
351 // A bailout panic is raised to indicate early termination.
352 type bailout struct{}
354 func (p *parser) error(pos token.Pos, msg string) {
355 epos := p.file.Position(pos)
357 // If AllErrors is not set, discard errors reported on the same line
358 // as the last recorded error and stop parsing if there are more than
359 // 10 errors.
360 if p.mode&AllErrors == 0 {
361 n := len(p.errors)
362 if n > 0 && p.errors[n-1].Pos.Line == epos.Line {
363 return // discard - likely a spurious error
365 if n > 10 {
366 panic(bailout{})
370 p.errors.Add(epos, msg)
373 func (p *parser) errorExpected(pos token.Pos, msg string) {
374 msg = "expected " + msg
375 if pos == p.pos {
376 // the error happened at the current position;
377 // make the error message more specific
378 switch {
379 case p.tok == token.SEMICOLON && p.lit == "\n":
380 msg += ", found newline"
381 case p.tok.IsLiteral():
382 // print 123 rather than 'INT', etc.
383 msg += ", found " + p.lit
384 default:
385 msg += ", found '" + p.tok.String() + "'"
388 p.error(pos, msg)
391 func (p *parser) expect(tok token.Token) token.Pos {
392 pos := p.pos
393 if p.tok != tok {
394 p.errorExpected(pos, "'"+tok.String()+"'")
396 p.next() // make progress
397 return pos
400 // expectClosing is like expect but provides a better error message
401 // for the common case of a missing comma before a newline.
403 func (p *parser) expectClosing(tok token.Token, context string) token.Pos {
404 if p.tok != tok && p.tok == token.SEMICOLON && p.lit == "\n" {
405 p.error(p.pos, "missing ',' before newline in "+context)
406 p.next()
408 return p.expect(tok)
411 func (p *parser) expectSemi() {
412 // semicolon is optional before a closing ')' or '}'
413 if p.tok != token.RPAREN && p.tok != token.RBRACE {
414 switch p.tok {
415 case token.COMMA:
416 // permit a ',' instead of a ';' but complain
417 p.errorExpected(p.pos, "';'")
418 fallthrough
419 case token.SEMICOLON:
420 p.next()
421 default:
422 p.errorExpected(p.pos, "';'")
423 p.advance(stmtStart)
428 func (p *parser) atComma(context string, follow token.Token) bool {
429 if p.tok == token.COMMA {
430 return true
432 if p.tok != follow {
433 msg := "missing ','"
434 if p.tok == token.SEMICOLON && p.lit == "\n" {
435 msg += " before newline"
437 p.error(p.pos, msg+" in "+context)
438 return true // "insert" comma and continue
440 return false
443 func assert(cond bool, msg string) {
444 if !cond {
445 panic("go/parser internal error: " + msg)
449 // advance consumes tokens until the current token p.tok
450 // is in the 'to' set, or token.EOF. For error recovery.
451 func (p *parser) advance(to map[token.Token]bool) {
452 for ; p.tok != token.EOF; p.next() {
453 if to[p.tok] {
454 // Return only if parser made some progress since last
455 // sync or if it has not reached 10 advance calls without
456 // progress. Otherwise consume at least one token to
457 // avoid an endless parser loop (it is possible that
458 // both parseOperand and parseStmt call advance and
459 // correctly do not advance, thus the need for the
460 // invocation limit p.syncCnt).
461 if p.pos == p.syncPos && p.syncCnt < 10 {
462 p.syncCnt++
463 return
465 if p.pos > p.syncPos {
466 p.syncPos = p.pos
467 p.syncCnt = 0
468 return
470 // Reaching here indicates a parser bug, likely an
471 // incorrect token list in this function, but it only
472 // leads to skipping of possibly correct code if a
473 // previous error is present, and thus is preferred
474 // over a non-terminating parse.
479 var stmtStart = map[token.Token]bool{
480 token.BREAK: true,
481 token.CONST: true,
482 token.CONTINUE: true,
483 token.DEFER: true,
484 token.FALLTHROUGH: true,
485 token.FOR: true,
486 token.GO: true,
487 token.GOTO: true,
488 token.IF: true,
489 token.RETURN: true,
490 token.SELECT: true,
491 token.SWITCH: true,
492 token.TYPE: true,
493 token.VAR: true,
496 var declStart = map[token.Token]bool{
497 token.CONST: true,
498 token.TYPE: true,
499 token.VAR: true,
502 var exprEnd = map[token.Token]bool{
503 token.COMMA: true,
504 token.COLON: true,
505 token.SEMICOLON: true,
506 token.RPAREN: true,
507 token.RBRACK: true,
508 token.RBRACE: true,
511 // safePos returns a valid file position for a given position: If pos
512 // is valid to begin with, safePos returns pos. If pos is out-of-range,
513 // safePos returns the EOF position.
515 // This is hack to work around "artificial" end positions in the AST which
516 // are computed by adding 1 to (presumably valid) token positions. If the
517 // token positions are invalid due to parse errors, the resulting end position
518 // may be past the file's EOF position, which would lead to panics if used
519 // later on.
521 func (p *parser) safePos(pos token.Pos) (res token.Pos) {
522 defer func() {
523 if recover() != nil {
524 res = token.Pos(p.file.Base() + p.file.Size()) // EOF position
527 _ = p.file.Offset(pos) // trigger a panic if position is out-of-range
528 return pos
531 // ----------------------------------------------------------------------------
532 // Identifiers
534 func (p *parser) parseIdent() *ast.Ident {
535 pos := p.pos
536 name := "_"
537 if p.tok == token.IDENT {
538 name = p.lit
539 p.next()
540 } else {
541 p.expect(token.IDENT) // use expect() error handling
543 return &ast.Ident{NamePos: pos, Name: name}
546 func (p *parser) parseIdentList() (list []*ast.Ident) {
547 if p.trace {
548 defer un(trace(p, "IdentList"))
551 list = append(list, p.parseIdent())
552 for p.tok == token.COMMA {
553 p.next()
554 list = append(list, p.parseIdent())
557 return
560 // ----------------------------------------------------------------------------
561 // Common productions
563 // If lhs is set, result list elements which are identifiers are not resolved.
564 func (p *parser) parseExprList(lhs bool) (list []ast.Expr) {
565 if p.trace {
566 defer un(trace(p, "ExpressionList"))
569 list = append(list, p.checkExpr(p.parseExpr(lhs)))
570 for p.tok == token.COMMA {
571 p.next()
572 list = append(list, p.checkExpr(p.parseExpr(lhs)))
575 return
578 func (p *parser) parseLhsList() []ast.Expr {
579 old := p.inRhs
580 p.inRhs = false
581 list := p.parseExprList(true)
582 switch p.tok {
583 case token.DEFINE:
584 // lhs of a short variable declaration
585 // but doesn't enter scope until later:
586 // caller must call p.shortVarDecl(p.makeIdentList(list))
587 // at appropriate time.
588 case token.COLON:
589 // lhs of a label declaration or a communication clause of a select
590 // statement (parseLhsList is not called when parsing the case clause
591 // of a switch statement):
592 // - labels are declared by the caller of parseLhsList
593 // - for communication clauses, if there is a stand-alone identifier
594 // followed by a colon, we have a syntax error; there is no need
595 // to resolve the identifier in that case
596 default:
597 // identifiers must be declared elsewhere
598 for _, x := range list {
599 p.resolve(x)
602 p.inRhs = old
603 return list
606 func (p *parser) parseRhsList() []ast.Expr {
607 old := p.inRhs
608 p.inRhs = true
609 list := p.parseExprList(false)
610 p.inRhs = old
611 return list
614 // ----------------------------------------------------------------------------
615 // Types
617 func (p *parser) parseType() ast.Expr {
618 if p.trace {
619 defer un(trace(p, "Type"))
622 typ := p.tryType()
624 if typ == nil {
625 pos := p.pos
626 p.errorExpected(pos, "type")
627 p.advance(exprEnd)
628 return &ast.BadExpr{From: pos, To: p.pos}
631 return typ
634 // If the result is an identifier, it is not resolved.
635 func (p *parser) parseTypeName() ast.Expr {
636 if p.trace {
637 defer un(trace(p, "TypeName"))
640 ident := p.parseIdent()
641 // don't resolve ident yet - it may be a parameter or field name
643 if p.tok == token.PERIOD {
644 // ident is a package name
645 p.next()
646 p.resolve(ident)
647 sel := p.parseIdent()
648 return &ast.SelectorExpr{X: ident, Sel: sel}
651 return ident
654 func (p *parser) parseArrayType() ast.Expr {
655 if p.trace {
656 defer un(trace(p, "ArrayType"))
659 lbrack := p.expect(token.LBRACK)
660 p.exprLev++
661 var len ast.Expr
662 // always permit ellipsis for more fault-tolerant parsing
663 if p.tok == token.ELLIPSIS {
664 len = &ast.Ellipsis{Ellipsis: p.pos}
665 p.next()
666 } else if p.tok != token.RBRACK {
667 len = p.parseRhs()
669 p.exprLev--
670 p.expect(token.RBRACK)
671 elt := p.parseType()
673 return &ast.ArrayType{Lbrack: lbrack, Len: len, Elt: elt}
676 func (p *parser) makeIdentList(list []ast.Expr) []*ast.Ident {
677 idents := make([]*ast.Ident, len(list))
678 for i, x := range list {
679 ident, isIdent := x.(*ast.Ident)
680 if !isIdent {
681 if _, isBad := x.(*ast.BadExpr); !isBad {
682 // only report error if it's a new one
683 p.errorExpected(x.Pos(), "identifier")
685 ident = &ast.Ident{NamePos: x.Pos(), Name: "_"}
687 idents[i] = ident
689 return idents
692 func (p *parser) parseFieldDecl(scope *ast.Scope) *ast.Field {
693 if p.trace {
694 defer un(trace(p, "FieldDecl"))
697 doc := p.leadComment
699 // 1st FieldDecl
700 // A type name used as an anonymous field looks like a field identifier.
701 var list []ast.Expr
702 for {
703 list = append(list, p.parseVarType(false))
704 if p.tok != token.COMMA {
705 break
707 p.next()
710 typ := p.tryVarType(false)
712 // analyze case
713 var idents []*ast.Ident
714 if typ != nil {
715 // IdentifierList Type
716 idents = p.makeIdentList(list)
717 } else {
718 // ["*"] TypeName (AnonymousField)
719 typ = list[0] // we always have at least one element
720 if n := len(list); n > 1 {
721 p.errorExpected(p.pos, "type")
722 typ = &ast.BadExpr{From: p.pos, To: p.pos}
723 } else if !isTypeName(deref(typ)) {
724 p.errorExpected(typ.Pos(), "anonymous field")
725 typ = &ast.BadExpr{From: typ.Pos(), To: p.safePos(typ.End())}
729 // Tag
730 var tag *ast.BasicLit
731 if p.tok == token.STRING {
732 tag = &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
733 p.next()
736 p.expectSemi() // call before accessing p.linecomment
738 field := &ast.Field{Doc: doc, Names: idents, Type: typ, Tag: tag, Comment: p.lineComment}
739 p.declare(field, nil, scope, ast.Var, idents...)
740 p.resolve(typ)
742 return field
745 func (p *parser) parseStructType() *ast.StructType {
746 if p.trace {
747 defer un(trace(p, "StructType"))
750 pos := p.expect(token.STRUCT)
751 lbrace := p.expect(token.LBRACE)
752 scope := ast.NewScope(nil) // struct scope
753 var list []*ast.Field
754 for p.tok == token.IDENT || p.tok == token.MUL || p.tok == token.LPAREN {
755 // a field declaration cannot start with a '(' but we accept
756 // it here for more robust parsing and better error messages
757 // (parseFieldDecl will check and complain if necessary)
758 list = append(list, p.parseFieldDecl(scope))
760 rbrace := p.expect(token.RBRACE)
762 return &ast.StructType{
763 Struct: pos,
764 Fields: &ast.FieldList{
765 Opening: lbrace,
766 List: list,
767 Closing: rbrace,
772 func (p *parser) parsePointerType() *ast.StarExpr {
773 if p.trace {
774 defer un(trace(p, "PointerType"))
777 star := p.expect(token.MUL)
778 base := p.parseType()
780 return &ast.StarExpr{Star: star, X: base}
783 // If the result is an identifier, it is not resolved.
784 func (p *parser) tryVarType(isParam bool) ast.Expr {
785 if isParam && p.tok == token.ELLIPSIS {
786 pos := p.pos
787 p.next()
788 typ := p.tryIdentOrType() // don't use parseType so we can provide better error message
789 if typ != nil {
790 p.resolve(typ)
791 } else {
792 p.error(pos, "'...' parameter is missing type")
793 typ = &ast.BadExpr{From: pos, To: p.pos}
795 return &ast.Ellipsis{Ellipsis: pos, Elt: typ}
797 return p.tryIdentOrType()
800 // If the result is an identifier, it is not resolved.
801 func (p *parser) parseVarType(isParam bool) ast.Expr {
802 typ := p.tryVarType(isParam)
803 if typ == nil {
804 pos := p.pos
805 p.errorExpected(pos, "type")
806 p.next() // make progress
807 typ = &ast.BadExpr{From: pos, To: p.pos}
809 return typ
812 func (p *parser) parseParameterList(scope *ast.Scope, ellipsisOk bool) (params []*ast.Field) {
813 if p.trace {
814 defer un(trace(p, "ParameterList"))
817 // 1st ParameterDecl
818 // A list of identifiers looks like a list of type names.
819 var list []ast.Expr
820 for {
821 list = append(list, p.parseVarType(ellipsisOk))
822 if p.tok != token.COMMA {
823 break
825 p.next()
826 if p.tok == token.RPAREN {
827 break
831 // analyze case
832 if typ := p.tryVarType(ellipsisOk); typ != nil {
833 // IdentifierList Type
834 idents := p.makeIdentList(list)
835 field := &ast.Field{Names: idents, Type: typ}
836 params = append(params, field)
837 // Go spec: The scope of an identifier denoting a function
838 // parameter or result variable is the function body.
839 p.declare(field, nil, scope, ast.Var, idents...)
840 p.resolve(typ)
841 if !p.atComma("parameter list", token.RPAREN) {
842 return
844 p.next()
845 for p.tok != token.RPAREN && p.tok != token.EOF {
846 idents := p.parseIdentList()
847 typ := p.parseVarType(ellipsisOk)
848 field := &ast.Field{Names: idents, Type: typ}
849 params = append(params, field)
850 // Go spec: The scope of an identifier denoting a function
851 // parameter or result variable is the function body.
852 p.declare(field, nil, scope, ast.Var, idents...)
853 p.resolve(typ)
854 if !p.atComma("parameter list", token.RPAREN) {
855 break
857 p.next()
859 return
862 // Type { "," Type } (anonymous parameters)
863 params = make([]*ast.Field, len(list))
864 for i, typ := range list {
865 p.resolve(typ)
866 params[i] = &ast.Field{Type: typ}
868 return
871 func (p *parser) parseParameters(scope *ast.Scope, ellipsisOk bool) *ast.FieldList {
872 if p.trace {
873 defer un(trace(p, "Parameters"))
876 var params []*ast.Field
877 lparen := p.expect(token.LPAREN)
878 if p.tok != token.RPAREN {
879 params = p.parseParameterList(scope, ellipsisOk)
881 rparen := p.expect(token.RPAREN)
883 return &ast.FieldList{Opening: lparen, List: params, Closing: rparen}
886 func (p *parser) parseResult(scope *ast.Scope) *ast.FieldList {
887 if p.trace {
888 defer un(trace(p, "Result"))
891 if p.tok == token.LPAREN {
892 return p.parseParameters(scope, false)
895 typ := p.tryType()
896 if typ != nil {
897 list := make([]*ast.Field, 1)
898 list[0] = &ast.Field{Type: typ}
899 return &ast.FieldList{List: list}
902 return nil
905 func (p *parser) parseSignature(scope *ast.Scope) (params, results *ast.FieldList) {
906 if p.trace {
907 defer un(trace(p, "Signature"))
910 params = p.parseParameters(scope, true)
911 results = p.parseResult(scope)
913 return
916 func (p *parser) parseFuncType() (*ast.FuncType, *ast.Scope) {
917 if p.trace {
918 defer un(trace(p, "FuncType"))
921 pos := p.expect(token.FUNC)
922 scope := ast.NewScope(p.topScope) // function scope
923 params, results := p.parseSignature(scope)
925 return &ast.FuncType{Func: pos, Params: params, Results: results}, scope
928 func (p *parser) parseMethodSpec(scope *ast.Scope) *ast.Field {
929 if p.trace {
930 defer un(trace(p, "MethodSpec"))
933 doc := p.leadComment
934 var idents []*ast.Ident
935 var typ ast.Expr
936 x := p.parseTypeName()
937 if ident, isIdent := x.(*ast.Ident); isIdent && p.tok == token.LPAREN {
938 // method
939 idents = []*ast.Ident{ident}
940 scope := ast.NewScope(nil) // method scope
941 params, results := p.parseSignature(scope)
942 typ = &ast.FuncType{Func: token.NoPos, Params: params, Results: results}
943 } else {
944 // embedded interface
945 typ = x
946 p.resolve(typ)
948 p.expectSemi() // call before accessing p.linecomment
950 spec := &ast.Field{Doc: doc, Names: idents, Type: typ, Comment: p.lineComment}
951 p.declare(spec, nil, scope, ast.Fun, idents...)
953 return spec
956 func (p *parser) parseInterfaceType() *ast.InterfaceType {
957 if p.trace {
958 defer un(trace(p, "InterfaceType"))
961 pos := p.expect(token.INTERFACE)
962 lbrace := p.expect(token.LBRACE)
963 scope := ast.NewScope(nil) // interface scope
964 var list []*ast.Field
965 for p.tok == token.IDENT {
966 list = append(list, p.parseMethodSpec(scope))
968 rbrace := p.expect(token.RBRACE)
970 return &ast.InterfaceType{
971 Interface: pos,
972 Methods: &ast.FieldList{
973 Opening: lbrace,
974 List: list,
975 Closing: rbrace,
980 func (p *parser) parseMapType() *ast.MapType {
981 if p.trace {
982 defer un(trace(p, "MapType"))
985 pos := p.expect(token.MAP)
986 p.expect(token.LBRACK)
987 key := p.parseType()
988 p.expect(token.RBRACK)
989 value := p.parseType()
991 return &ast.MapType{Map: pos, Key: key, Value: value}
994 func (p *parser) parseChanType() *ast.ChanType {
995 if p.trace {
996 defer un(trace(p, "ChanType"))
999 pos := p.pos
1000 dir := ast.SEND | ast.RECV
1001 var arrow token.Pos
1002 if p.tok == token.CHAN {
1003 p.next()
1004 if p.tok == token.ARROW {
1005 arrow = p.pos
1006 p.next()
1007 dir = ast.SEND
1009 } else {
1010 arrow = p.expect(token.ARROW)
1011 p.expect(token.CHAN)
1012 dir = ast.RECV
1014 value := p.parseType()
1016 return &ast.ChanType{Begin: pos, Arrow: arrow, Dir: dir, Value: value}
1019 // If the result is an identifier, it is not resolved.
1020 func (p *parser) tryIdentOrType() ast.Expr {
1021 switch p.tok {
1022 case token.IDENT:
1023 return p.parseTypeName()
1024 case token.LBRACK:
1025 return p.parseArrayType()
1026 case token.STRUCT:
1027 return p.parseStructType()
1028 case token.MUL:
1029 return p.parsePointerType()
1030 case token.FUNC:
1031 typ, _ := p.parseFuncType()
1032 return typ
1033 case token.INTERFACE:
1034 return p.parseInterfaceType()
1035 case token.MAP:
1036 return p.parseMapType()
1037 case token.CHAN, token.ARROW:
1038 return p.parseChanType()
1039 case token.LPAREN:
1040 lparen := p.pos
1041 p.next()
1042 typ := p.parseType()
1043 rparen := p.expect(token.RPAREN)
1044 return &ast.ParenExpr{Lparen: lparen, X: typ, Rparen: rparen}
1047 // no type found
1048 return nil
1051 func (p *parser) tryType() ast.Expr {
1052 typ := p.tryIdentOrType()
1053 if typ != nil {
1054 p.resolve(typ)
1056 return typ
1059 // ----------------------------------------------------------------------------
1060 // Blocks
1062 func (p *parser) parseStmtList() (list []ast.Stmt) {
1063 if p.trace {
1064 defer un(trace(p, "StatementList"))
1067 for p.tok != token.CASE && p.tok != token.DEFAULT && p.tok != token.RBRACE && p.tok != token.EOF {
1068 list = append(list, p.parseStmt())
1071 return
1074 func (p *parser) parseBody(scope *ast.Scope) *ast.BlockStmt {
1075 if p.trace {
1076 defer un(trace(p, "Body"))
1079 lbrace := p.expect(token.LBRACE)
1080 p.topScope = scope // open function scope
1081 p.openLabelScope()
1082 list := p.parseStmtList()
1083 p.closeLabelScope()
1084 p.closeScope()
1085 rbrace := p.expect(token.RBRACE)
1087 return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
1090 func (p *parser) parseBlockStmt() *ast.BlockStmt {
1091 if p.trace {
1092 defer un(trace(p, "BlockStmt"))
1095 lbrace := p.expect(token.LBRACE)
1096 p.openScope()
1097 list := p.parseStmtList()
1098 p.closeScope()
1099 rbrace := p.expect(token.RBRACE)
1101 return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
1104 // ----------------------------------------------------------------------------
1105 // Expressions
1107 func (p *parser) parseFuncTypeOrLit() ast.Expr {
1108 if p.trace {
1109 defer un(trace(p, "FuncTypeOrLit"))
1112 typ, scope := p.parseFuncType()
1113 if p.tok != token.LBRACE {
1114 // function type only
1115 return typ
1118 p.exprLev++
1119 body := p.parseBody(scope)
1120 p.exprLev--
1122 return &ast.FuncLit{Type: typ, Body: body}
1125 // parseOperand may return an expression or a raw type (incl. array
1126 // types of the form [...]T. Callers must verify the result.
1127 // If lhs is set and the result is an identifier, it is not resolved.
1129 func (p *parser) parseOperand(lhs bool) ast.Expr {
1130 if p.trace {
1131 defer un(trace(p, "Operand"))
1134 switch p.tok {
1135 case token.IDENT:
1136 x := p.parseIdent()
1137 if !lhs {
1138 p.resolve(x)
1140 return x
1142 case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING:
1143 x := &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
1144 p.next()
1145 return x
1147 case token.LPAREN:
1148 lparen := p.pos
1149 p.next()
1150 p.exprLev++
1151 x := p.parseRhsOrType() // types may be parenthesized: (some type)
1152 p.exprLev--
1153 rparen := p.expect(token.RPAREN)
1154 return &ast.ParenExpr{Lparen: lparen, X: x, Rparen: rparen}
1156 case token.FUNC:
1157 return p.parseFuncTypeOrLit()
1160 if typ := p.tryIdentOrType(); typ != nil {
1161 // could be type for composite literal or conversion
1162 _, isIdent := typ.(*ast.Ident)
1163 assert(!isIdent, "type cannot be identifier")
1164 return typ
1167 // we have an error
1168 pos := p.pos
1169 p.errorExpected(pos, "operand")
1170 p.advance(stmtStart)
1171 return &ast.BadExpr{From: pos, To: p.pos}
1174 func (p *parser) parseSelector(x ast.Expr) ast.Expr {
1175 if p.trace {
1176 defer un(trace(p, "Selector"))
1179 sel := p.parseIdent()
1181 return &ast.SelectorExpr{X: x, Sel: sel}
1184 func (p *parser) parseTypeAssertion(x ast.Expr) ast.Expr {
1185 if p.trace {
1186 defer un(trace(p, "TypeAssertion"))
1189 lparen := p.expect(token.LPAREN)
1190 var typ ast.Expr
1191 if p.tok == token.TYPE {
1192 // type switch: typ == nil
1193 p.next()
1194 } else {
1195 typ = p.parseType()
1197 rparen := p.expect(token.RPAREN)
1199 return &ast.TypeAssertExpr{X: x, Type: typ, Lparen: lparen, Rparen: rparen}
1202 func (p *parser) parseIndexOrSlice(x ast.Expr) ast.Expr {
1203 if p.trace {
1204 defer un(trace(p, "IndexOrSlice"))
1207 const N = 3 // change the 3 to 2 to disable 3-index slices
1208 lbrack := p.expect(token.LBRACK)
1209 p.exprLev++
1210 var index [N]ast.Expr
1211 var colons [N - 1]token.Pos
1212 if p.tok != token.COLON {
1213 index[0] = p.parseRhs()
1215 ncolons := 0
1216 for p.tok == token.COLON && ncolons < len(colons) {
1217 colons[ncolons] = p.pos
1218 ncolons++
1219 p.next()
1220 if p.tok != token.COLON && p.tok != token.RBRACK && p.tok != token.EOF {
1221 index[ncolons] = p.parseRhs()
1224 p.exprLev--
1225 rbrack := p.expect(token.RBRACK)
1227 if ncolons > 0 {
1228 // slice expression
1229 slice3 := false
1230 if ncolons == 2 {
1231 slice3 = true
1232 // Check presence of 2nd and 3rd index here rather than during type-checking
1233 // to prevent erroneous programs from passing through gofmt (was issue 7305).
1234 if index[1] == nil {
1235 p.error(colons[0], "2nd index required in 3-index slice")
1236 index[1] = &ast.BadExpr{From: colons[0] + 1, To: colons[1]}
1238 if index[2] == nil {
1239 p.error(colons[1], "3rd index required in 3-index slice")
1240 index[2] = &ast.BadExpr{From: colons[1] + 1, To: rbrack}
1243 return &ast.SliceExpr{X: x, Lbrack: lbrack, Low: index[0], High: index[1], Max: index[2], Slice3: slice3, Rbrack: rbrack}
1246 return &ast.IndexExpr{X: x, Lbrack: lbrack, Index: index[0], Rbrack: rbrack}
1249 func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr {
1250 if p.trace {
1251 defer un(trace(p, "CallOrConversion"))
1254 lparen := p.expect(token.LPAREN)
1255 p.exprLev++
1256 var list []ast.Expr
1257 var ellipsis token.Pos
1258 for p.tok != token.RPAREN && p.tok != token.EOF && !ellipsis.IsValid() {
1259 list = append(list, p.parseRhsOrType()) // builtins may expect a type: make(some type, ...)
1260 if p.tok == token.ELLIPSIS {
1261 ellipsis = p.pos
1262 p.next()
1264 if !p.atComma("argument list", token.RPAREN) {
1265 break
1267 p.next()
1269 p.exprLev--
1270 rparen := p.expectClosing(token.RPAREN, "argument list")
1272 return &ast.CallExpr{Fun: fun, Lparen: lparen, Args: list, Ellipsis: ellipsis, Rparen: rparen}
1275 func (p *parser) parseValue(keyOk bool) ast.Expr {
1276 if p.trace {
1277 defer un(trace(p, "Element"))
1280 if p.tok == token.LBRACE {
1281 return p.parseLiteralValue(nil)
1284 // Because the parser doesn't know the composite literal type, it cannot
1285 // know if a key that's an identifier is a struct field name or a name
1286 // denoting a value. The former is not resolved by the parser or the
1287 // resolver.
1289 // Instead, _try_ to resolve such a key if possible. If it resolves,
1290 // it a) has correctly resolved, or b) incorrectly resolved because
1291 // the key is a struct field with a name matching another identifier.
1292 // In the former case we are done, and in the latter case we don't
1293 // care because the type checker will do a separate field lookup.
1295 // If the key does not resolve, it a) must be defined at the top
1296 // level in another file of the same package, the universe scope, or be
1297 // undeclared; or b) it is a struct field. In the former case, the type
1298 // checker can do a top-level lookup, and in the latter case it will do
1299 // a separate field lookup.
1300 x := p.checkExpr(p.parseExpr(keyOk))
1301 if keyOk {
1302 if p.tok == token.COLON {
1303 // Try to resolve the key but don't collect it
1304 // as unresolved identifier if it fails so that
1305 // we don't get (possibly false) errors about
1306 // undeclared names.
1307 p.tryResolve(x, false)
1308 } else {
1309 // not a key
1310 p.resolve(x)
1314 return x
1317 func (p *parser) parseElement() ast.Expr {
1318 if p.trace {
1319 defer un(trace(p, "Element"))
1322 x := p.parseValue(true)
1323 if p.tok == token.COLON {
1324 colon := p.pos
1325 p.next()
1326 x = &ast.KeyValueExpr{Key: x, Colon: colon, Value: p.parseValue(false)}
1329 return x
1332 func (p *parser) parseElementList() (list []ast.Expr) {
1333 if p.trace {
1334 defer un(trace(p, "ElementList"))
1337 for p.tok != token.RBRACE && p.tok != token.EOF {
1338 list = append(list, p.parseElement())
1339 if !p.atComma("composite literal", token.RBRACE) {
1340 break
1342 p.next()
1345 return
1348 func (p *parser) parseLiteralValue(typ ast.Expr) ast.Expr {
1349 if p.trace {
1350 defer un(trace(p, "LiteralValue"))
1353 lbrace := p.expect(token.LBRACE)
1354 var elts []ast.Expr
1355 p.exprLev++
1356 if p.tok != token.RBRACE {
1357 elts = p.parseElementList()
1359 p.exprLev--
1360 rbrace := p.expectClosing(token.RBRACE, "composite literal")
1361 return &ast.CompositeLit{Type: typ, Lbrace: lbrace, Elts: elts, Rbrace: rbrace}
1364 // checkExpr checks that x is an expression (and not a type).
1365 func (p *parser) checkExpr(x ast.Expr) ast.Expr {
1366 switch unparen(x).(type) {
1367 case *ast.BadExpr:
1368 case *ast.Ident:
1369 case *ast.BasicLit:
1370 case *ast.FuncLit:
1371 case *ast.CompositeLit:
1372 case *ast.ParenExpr:
1373 panic("unreachable")
1374 case *ast.SelectorExpr:
1375 case *ast.IndexExpr:
1376 case *ast.SliceExpr:
1377 case *ast.TypeAssertExpr:
1378 // If t.Type == nil we have a type assertion of the form
1379 // y.(type), which is only allowed in type switch expressions.
1380 // It's hard to exclude those but for the case where we are in
1381 // a type switch. Instead be lenient and test this in the type
1382 // checker.
1383 case *ast.CallExpr:
1384 case *ast.StarExpr:
1385 case *ast.UnaryExpr:
1386 case *ast.BinaryExpr:
1387 default:
1388 // all other nodes are not proper expressions
1389 p.errorExpected(x.Pos(), "expression")
1390 x = &ast.BadExpr{From: x.Pos(), To: p.safePos(x.End())}
1392 return x
1395 // isTypeName reports whether x is a (qualified) TypeName.
1396 func isTypeName(x ast.Expr) bool {
1397 switch t := x.(type) {
1398 case *ast.BadExpr:
1399 case *ast.Ident:
1400 case *ast.SelectorExpr:
1401 _, isIdent := t.X.(*ast.Ident)
1402 return isIdent
1403 default:
1404 return false // all other nodes are not type names
1406 return true
1409 // isLiteralType reports whether x is a legal composite literal type.
1410 func isLiteralType(x ast.Expr) bool {
1411 switch t := x.(type) {
1412 case *ast.BadExpr:
1413 case *ast.Ident:
1414 case *ast.SelectorExpr:
1415 _, isIdent := t.X.(*ast.Ident)
1416 return isIdent
1417 case *ast.ArrayType:
1418 case *ast.StructType:
1419 case *ast.MapType:
1420 default:
1421 return false // all other nodes are not legal composite literal types
1423 return true
1426 // If x is of the form *T, deref returns T, otherwise it returns x.
1427 func deref(x ast.Expr) ast.Expr {
1428 if p, isPtr := x.(*ast.StarExpr); isPtr {
1429 x = p.X
1431 return x
1434 // If x is of the form (T), unparen returns unparen(T), otherwise it returns x.
1435 func unparen(x ast.Expr) ast.Expr {
1436 if p, isParen := x.(*ast.ParenExpr); isParen {
1437 x = unparen(p.X)
1439 return x
1442 // checkExprOrType checks that x is an expression or a type
1443 // (and not a raw type such as [...]T).
1445 func (p *parser) checkExprOrType(x ast.Expr) ast.Expr {
1446 switch t := unparen(x).(type) {
1447 case *ast.ParenExpr:
1448 panic("unreachable")
1449 case *ast.UnaryExpr:
1450 case *ast.ArrayType:
1451 if len, isEllipsis := t.Len.(*ast.Ellipsis); isEllipsis {
1452 p.error(len.Pos(), "expected array length, found '...'")
1453 x = &ast.BadExpr{From: x.Pos(), To: p.safePos(x.End())}
1457 // all other nodes are expressions or types
1458 return x
1461 // If lhs is set and the result is an identifier, it is not resolved.
1462 func (p *parser) parsePrimaryExpr(lhs bool) ast.Expr {
1463 if p.trace {
1464 defer un(trace(p, "PrimaryExpr"))
1467 x := p.parseOperand(lhs)
1469 for {
1470 switch p.tok {
1471 case token.PERIOD:
1472 p.next()
1473 if lhs {
1474 p.resolve(x)
1476 switch p.tok {
1477 case token.IDENT:
1478 x = p.parseSelector(p.checkExprOrType(x))
1479 case token.LPAREN:
1480 x = p.parseTypeAssertion(p.checkExpr(x))
1481 default:
1482 pos := p.pos
1483 p.errorExpected(pos, "selector or type assertion")
1484 p.next() // make progress
1485 sel := &ast.Ident{NamePos: pos, Name: "_"}
1486 x = &ast.SelectorExpr{X: x, Sel: sel}
1488 case token.LBRACK:
1489 if lhs {
1490 p.resolve(x)
1492 x = p.parseIndexOrSlice(p.checkExpr(x))
1493 case token.LPAREN:
1494 if lhs {
1495 p.resolve(x)
1497 x = p.parseCallOrConversion(p.checkExprOrType(x))
1498 case token.LBRACE:
1499 if isLiteralType(x) && (p.exprLev >= 0 || !isTypeName(x)) {
1500 if lhs {
1501 p.resolve(x)
1503 x = p.parseLiteralValue(x)
1504 } else {
1505 break L
1507 default:
1508 break L
1510 lhs = false // no need to try to resolve again
1513 return x
1516 // If lhs is set and the result is an identifier, it is not resolved.
1517 func (p *parser) parseUnaryExpr(lhs bool) ast.Expr {
1518 if p.trace {
1519 defer un(trace(p, "UnaryExpr"))
1522 switch p.tok {
1523 case token.ADD, token.SUB, token.NOT, token.XOR, token.AND:
1524 pos, op := p.pos, p.tok
1525 p.next()
1526 x := p.parseUnaryExpr(false)
1527 return &ast.UnaryExpr{OpPos: pos, Op: op, X: p.checkExpr(x)}
1529 case token.ARROW:
1530 // channel type or receive expression
1531 arrow := p.pos
1532 p.next()
1534 // If the next token is token.CHAN we still don't know if it
1535 // is a channel type or a receive operation - we only know
1536 // once we have found the end of the unary expression. There
1537 // are two cases:
1539 // <- type => (<-type) must be channel type
1540 // <- expr => <-(expr) is a receive from an expression
1542 // In the first case, the arrow must be re-associated with
1543 // the channel type parsed already:
1545 // <- (chan type) => (<-chan type)
1546 // <- (chan<- type) => (<-chan (<-type))
1548 x := p.parseUnaryExpr(false)
1550 // determine which case we have
1551 if typ, ok := x.(*ast.ChanType); ok {
1552 // (<-type)
1554 // re-associate position info and <-
1555 dir := ast.SEND
1556 for ok && dir == ast.SEND {
1557 if typ.Dir == ast.RECV {
1558 // error: (<-type) is (<-(<-chan T))
1559 p.errorExpected(typ.Arrow, "'chan'")
1561 arrow, typ.Begin, typ.Arrow = typ.Arrow, arrow, arrow
1562 dir, typ.Dir = typ.Dir, ast.RECV
1563 typ, ok = typ.Value.(*ast.ChanType)
1565 if dir == ast.SEND {
1566 p.errorExpected(arrow, "channel type")
1569 return x
1572 // <-(expr)
1573 return &ast.UnaryExpr{OpPos: arrow, Op: token.ARROW, X: p.checkExpr(x)}
1575 case token.MUL:
1576 // pointer type or unary "*" expression
1577 pos := p.pos
1578 p.next()
1579 x := p.parseUnaryExpr(false)
1580 return &ast.StarExpr{Star: pos, X: p.checkExprOrType(x)}
1583 return p.parsePrimaryExpr(lhs)
1586 func (p *parser) tokPrec() (token.Token, int) {
1587 tok := p.tok
1588 if p.inRhs && tok == token.ASSIGN {
1589 tok = token.EQL
1591 return tok, tok.Precedence()
1594 // If lhs is set and the result is an identifier, it is not resolved.
1595 func (p *parser) parseBinaryExpr(lhs bool, prec1 int) ast.Expr {
1596 if p.trace {
1597 defer un(trace(p, "BinaryExpr"))
1600 x := p.parseUnaryExpr(lhs)
1601 for {
1602 op, oprec := p.tokPrec()
1603 if oprec < prec1 {
1604 return x
1606 pos := p.expect(op)
1607 if lhs {
1608 p.resolve(x)
1609 lhs = false
1611 y := p.parseBinaryExpr(false, oprec+1)
1612 x = &ast.BinaryExpr{X: p.checkExpr(x), OpPos: pos, Op: op, Y: p.checkExpr(y)}
1616 // If lhs is set and the result is an identifier, it is not resolved.
1617 // The result may be a type or even a raw type ([...]int). Callers must
1618 // check the result (using checkExpr or checkExprOrType), depending on
1619 // context.
1620 func (p *parser) parseExpr(lhs bool) ast.Expr {
1621 if p.trace {
1622 defer un(trace(p, "Expression"))
1625 return p.parseBinaryExpr(lhs, token.LowestPrec+1)
1628 func (p *parser) parseRhs() ast.Expr {
1629 old := p.inRhs
1630 p.inRhs = true
1631 x := p.checkExpr(p.parseExpr(false))
1632 p.inRhs = old
1633 return x
1636 func (p *parser) parseRhsOrType() ast.Expr {
1637 old := p.inRhs
1638 p.inRhs = true
1639 x := p.checkExprOrType(p.parseExpr(false))
1640 p.inRhs = old
1641 return x
1644 // ----------------------------------------------------------------------------
1645 // Statements
1647 // Parsing modes for parseSimpleStmt.
1648 const (
1649 basic = iota
1650 labelOk
1651 rangeOk
1654 // parseSimpleStmt returns true as 2nd result if it parsed the assignment
1655 // of a range clause (with mode == rangeOk). The returned statement is an
1656 // assignment with a right-hand side that is a single unary expression of
1657 // the form "range x". No guarantees are given for the left-hand side.
1658 func (p *parser) parseSimpleStmt(mode int) (ast.Stmt, bool) {
1659 if p.trace {
1660 defer un(trace(p, "SimpleStmt"))
1663 x := p.parseLhsList()
1665 switch p.tok {
1666 case
1667 token.DEFINE, token.ASSIGN, token.ADD_ASSIGN,
1668 token.SUB_ASSIGN, token.MUL_ASSIGN, token.QUO_ASSIGN,
1669 token.REM_ASSIGN, token.AND_ASSIGN, token.OR_ASSIGN,
1670 token.XOR_ASSIGN, token.SHL_ASSIGN, token.SHR_ASSIGN, token.AND_NOT_ASSIGN:
1671 // assignment statement, possibly part of a range clause
1672 pos, tok := p.pos, p.tok
1673 p.next()
1674 var y []ast.Expr
1675 isRange := false
1676 if mode == rangeOk && p.tok == token.RANGE && (tok == token.DEFINE || tok == token.ASSIGN) {
1677 pos := p.pos
1678 p.next()
1679 y = []ast.Expr{&ast.UnaryExpr{OpPos: pos, Op: token.RANGE, X: p.parseRhs()}}
1680 isRange = true
1681 } else {
1682 y = p.parseRhsList()
1684 as := &ast.AssignStmt{Lhs: x, TokPos: pos, Tok: tok, Rhs: y}
1685 if tok == token.DEFINE {
1686 p.shortVarDecl(as, x)
1688 return as, isRange
1691 if len(x) > 1 {
1692 p.errorExpected(x[0].Pos(), "1 expression")
1693 // continue with first expression
1696 switch p.tok {
1697 case token.COLON:
1698 // labeled statement
1699 colon := p.pos
1700 p.next()
1701 if label, isIdent := x[0].(*ast.Ident); mode == labelOk && isIdent {
1702 // Go spec: The scope of a label is the body of the function
1703 // in which it is declared and excludes the body of any nested
1704 // function.
1705 stmt := &ast.LabeledStmt{Label: label, Colon: colon, Stmt: p.parseStmt()}
1706 p.declare(stmt, nil, p.labelScope, ast.Lbl, label)
1707 return stmt, false
1709 // The label declaration typically starts at x[0].Pos(), but the label
1710 // declaration may be erroneous due to a token after that position (and
1711 // before the ':'). If SpuriousErrors is not set, the (only) error
1712 // reported for the line is the illegal label error instead of the token
1713 // before the ':' that caused the problem. Thus, use the (latest) colon
1714 // position for error reporting.
1715 p.error(colon, "illegal label declaration")
1716 return &ast.BadStmt{From: x[0].Pos(), To: colon + 1}, false
1718 case token.ARROW:
1719 // send statement
1720 arrow := p.pos
1721 p.next()
1722 y := p.parseRhs()
1723 return &ast.SendStmt{Chan: x[0], Arrow: arrow, Value: y}, false
1725 case token.INC, token.DEC:
1726 // increment or decrement
1727 s := &ast.IncDecStmt{X: x[0], TokPos: p.pos, Tok: p.tok}
1728 p.next()
1729 return s, false
1732 // expression
1733 return &ast.ExprStmt{X: x[0]}, false
1736 func (p *parser) parseCallExpr(callType string) *ast.CallExpr {
1737 x := p.parseRhsOrType() // could be a conversion: (some type)(x)
1738 if call, isCall := x.(*ast.CallExpr); isCall {
1739 return call
1741 if _, isBad := x.(*ast.BadExpr); !isBad {
1742 // only report error if it's a new one
1743 p.error(p.safePos(x.End()), fmt.Sprintf("function must be invoked in %s statement", callType))
1745 return nil
1748 func (p *parser) parseGoStmt() ast.Stmt {
1749 if p.trace {
1750 defer un(trace(p, "GoStmt"))
1753 pos := p.expect(token.GO)
1754 call := p.parseCallExpr("go")
1755 p.expectSemi()
1756 if call == nil {
1757 return &ast.BadStmt{From: pos, To: pos + 2} // len("go")
1760 return &ast.GoStmt{Go: pos, Call: call}
1763 func (p *parser) parseDeferStmt() ast.Stmt {
1764 if p.trace {
1765 defer un(trace(p, "DeferStmt"))
1768 pos := p.expect(token.DEFER)
1769 call := p.parseCallExpr("defer")
1770 p.expectSemi()
1771 if call == nil {
1772 return &ast.BadStmt{From: pos, To: pos + 5} // len("defer")
1775 return &ast.DeferStmt{Defer: pos, Call: call}
1778 func (p *parser) parseReturnStmt() *ast.ReturnStmt {
1779 if p.trace {
1780 defer un(trace(p, "ReturnStmt"))
1783 pos := p.pos
1784 p.expect(token.RETURN)
1785 var x []ast.Expr
1786 if p.tok != token.SEMICOLON && p.tok != token.RBRACE {
1787 x = p.parseRhsList()
1789 p.expectSemi()
1791 return &ast.ReturnStmt{Return: pos, Results: x}
1794 func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt {
1795 if p.trace {
1796 defer un(trace(p, "BranchStmt"))
1799 pos := p.expect(tok)
1800 var label *ast.Ident
1801 if tok != token.FALLTHROUGH && p.tok == token.IDENT {
1802 label = p.parseIdent()
1803 // add to list of unresolved targets
1804 n := len(p.targetStack) - 1
1805 p.targetStack[n] = append(p.targetStack[n], label)
1807 p.expectSemi()
1809 return &ast.BranchStmt{TokPos: pos, Tok: tok, Label: label}
1812 func (p *parser) makeExpr(s ast.Stmt, want string) ast.Expr {
1813 if s == nil {
1814 return nil
1816 if es, isExpr := s.(*ast.ExprStmt); isExpr {
1817 return p.checkExpr(es.X)
1819 found := "simple statement"
1820 if _, isAss := s.(*ast.AssignStmt); isAss {
1821 found = "assignment"
1823 p.error(s.Pos(), fmt.Sprintf("expected %s, found %s (missing parentheses around composite literal?)", want, found))
1824 return &ast.BadExpr{From: s.Pos(), To: p.safePos(s.End())}
1827 // parseIfHeader is an adjusted version of parser.header
1828 // in cmd/compile/internal/syntax/parser.go, which has
1829 // been tuned for better error handling.
1830 func (p *parser) parseIfHeader() (init ast.Stmt, cond ast.Expr) {
1831 if p.tok == token.LBRACE {
1832 p.error(p.pos, "missing condition in if statement")
1833 cond = &ast.BadExpr{From: p.pos, To: p.pos}
1834 return
1836 // p.tok != token.LBRACE
1838 outer := p.exprLev
1839 p.exprLev = -1
1841 if p.tok != token.SEMICOLON {
1842 // accept potential variable declaration but complain
1843 if p.tok == token.VAR {
1844 p.next()
1845 p.error(p.pos, fmt.Sprintf("var declaration not allowed in 'IF' initializer"))
1847 init, _ = p.parseSimpleStmt(basic)
1850 var condStmt ast.Stmt
1851 var semi struct {
1852 pos token.Pos
1853 lit string // ";" or "\n"; valid if pos.IsValid()
1855 if p.tok != token.LBRACE {
1856 if p.tok == token.SEMICOLON {
1857 semi.pos = p.pos
1858 semi.lit = p.lit
1859 p.next()
1860 } else {
1861 p.expect(token.SEMICOLON)
1863 if p.tok != token.LBRACE {
1864 condStmt, _ = p.parseSimpleStmt(basic)
1866 } else {
1867 condStmt = init
1868 init = nil
1871 if condStmt != nil {
1872 cond = p.makeExpr(condStmt, "boolean expression")
1873 } else if semi.pos.IsValid() {
1874 if semi.lit == "\n" {
1875 p.error(semi.pos, "unexpected newline, expecting { after if clause")
1876 } else {
1877 p.error(semi.pos, "missing condition in if statement")
1881 // make sure we have a valid AST
1882 if cond == nil {
1883 cond = &ast.BadExpr{From: p.pos, To: p.pos}
1886 p.exprLev = outer
1887 return
1890 func (p *parser) parseIfStmt() *ast.IfStmt {
1891 if p.trace {
1892 defer un(trace(p, "IfStmt"))
1895 pos := p.expect(token.IF)
1896 p.openScope()
1897 defer p.closeScope()
1899 init, cond := p.parseIfHeader()
1900 body := p.parseBlockStmt()
1902 var else_ ast.Stmt
1903 if p.tok == token.ELSE {
1904 p.next()
1905 switch p.tok {
1906 case token.IF:
1907 else_ = p.parseIfStmt()
1908 case token.LBRACE:
1909 else_ = p.parseBlockStmt()
1910 p.expectSemi()
1911 default:
1912 p.errorExpected(p.pos, "if statement or block")
1913 else_ = &ast.BadStmt{From: p.pos, To: p.pos}
1915 } else {
1916 p.expectSemi()
1919 return &ast.IfStmt{If: pos, Init: init, Cond: cond, Body: body, Else: else_}
1922 func (p *parser) parseTypeList() (list []ast.Expr) {
1923 if p.trace {
1924 defer un(trace(p, "TypeList"))
1927 list = append(list, p.parseType())
1928 for p.tok == token.COMMA {
1929 p.next()
1930 list = append(list, p.parseType())
1933 return
1936 func (p *parser) parseCaseClause(typeSwitch bool) *ast.CaseClause {
1937 if p.trace {
1938 defer un(trace(p, "CaseClause"))
1941 pos := p.pos
1942 var list []ast.Expr
1943 if p.tok == token.CASE {
1944 p.next()
1945 if typeSwitch {
1946 list = p.parseTypeList()
1947 } else {
1948 list = p.parseRhsList()
1950 } else {
1951 p.expect(token.DEFAULT)
1954 colon := p.expect(token.COLON)
1955 p.openScope()
1956 body := p.parseStmtList()
1957 p.closeScope()
1959 return &ast.CaseClause{Case: pos, List: list, Colon: colon, Body: body}
1962 func isTypeSwitchAssert(x ast.Expr) bool {
1963 a, ok := x.(*ast.TypeAssertExpr)
1964 return ok && a.Type == nil
1967 func (p *parser) isTypeSwitchGuard(s ast.Stmt) bool {
1968 switch t := s.(type) {
1969 case *ast.ExprStmt:
1970 // x.(type)
1971 return isTypeSwitchAssert(t.X)
1972 case *ast.AssignStmt:
1973 // v := x.(type)
1974 if len(t.Lhs) == 1 && len(t.Rhs) == 1 && isTypeSwitchAssert(t.Rhs[0]) {
1975 switch t.Tok {
1976 case token.ASSIGN:
1977 // permit v = x.(type) but complain
1978 p.error(t.TokPos, "expected ':=', found '='")
1979 fallthrough
1980 case token.DEFINE:
1981 return true
1985 return false
1988 func (p *parser) parseSwitchStmt() ast.Stmt {
1989 if p.trace {
1990 defer un(trace(p, "SwitchStmt"))
1993 pos := p.expect(token.SWITCH)
1994 p.openScope()
1995 defer p.closeScope()
1997 var s1, s2 ast.Stmt
1998 if p.tok != token.LBRACE {
1999 prevLev := p.exprLev
2000 p.exprLev = -1
2001 if p.tok != token.SEMICOLON {
2002 s2, _ = p.parseSimpleStmt(basic)
2004 if p.tok == token.SEMICOLON {
2005 p.next()
2006 s1 = s2
2007 s2 = nil
2008 if p.tok != token.LBRACE {
2009 // A TypeSwitchGuard may declare a variable in addition
2010 // to the variable declared in the initial SimpleStmt.
2011 // Introduce extra scope to avoid redeclaration errors:
2013 // switch t := 0; t := x.(T) { ... }
2015 // (this code is not valid Go because the first t
2016 // cannot be accessed and thus is never used, the extra
2017 // scope is needed for the correct error message).
2019 // If we don't have a type switch, s2 must be an expression.
2020 // Having the extra nested but empty scope won't affect it.
2021 p.openScope()
2022 defer p.closeScope()
2023 s2, _ = p.parseSimpleStmt(basic)
2026 p.exprLev = prevLev
2029 typeSwitch := p.isTypeSwitchGuard(s2)
2030 lbrace := p.expect(token.LBRACE)
2031 var list []ast.Stmt
2032 for p.tok == token.CASE || p.tok == token.DEFAULT {
2033 list = append(list, p.parseCaseClause(typeSwitch))
2035 rbrace := p.expect(token.RBRACE)
2036 p.expectSemi()
2037 body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
2039 if typeSwitch {
2040 return &ast.TypeSwitchStmt{Switch: pos, Init: s1, Assign: s2, Body: body}
2043 return &ast.SwitchStmt{Switch: pos, Init: s1, Tag: p.makeExpr(s2, "switch expression"), Body: body}
2046 func (p *parser) parseCommClause() *ast.CommClause {
2047 if p.trace {
2048 defer un(trace(p, "CommClause"))
2051 p.openScope()
2052 pos := p.pos
2053 var comm ast.Stmt
2054 if p.tok == token.CASE {
2055 p.next()
2056 lhs := p.parseLhsList()
2057 if p.tok == token.ARROW {
2058 // SendStmt
2059 if len(lhs) > 1 {
2060 p.errorExpected(lhs[0].Pos(), "1 expression")
2061 // continue with first expression
2063 arrow := p.pos
2064 p.next()
2065 rhs := p.parseRhs()
2066 comm = &ast.SendStmt{Chan: lhs[0], Arrow: arrow, Value: rhs}
2067 } else {
2068 // RecvStmt
2069 if tok := p.tok; tok == token.ASSIGN || tok == token.DEFINE {
2070 // RecvStmt with assignment
2071 if len(lhs) > 2 {
2072 p.errorExpected(lhs[0].Pos(), "1 or 2 expressions")
2073 // continue with first two expressions
2074 lhs = lhs[0:2]
2076 pos := p.pos
2077 p.next()
2078 rhs := p.parseRhs()
2079 as := &ast.AssignStmt{Lhs: lhs, TokPos: pos, Tok: tok, Rhs: []ast.Expr{rhs}}
2080 if tok == token.DEFINE {
2081 p.shortVarDecl(as, lhs)
2083 comm = as
2084 } else {
2085 // lhs must be single receive operation
2086 if len(lhs) > 1 {
2087 p.errorExpected(lhs[0].Pos(), "1 expression")
2088 // continue with first expression
2090 comm = &ast.ExprStmt{X: lhs[0]}
2093 } else {
2094 p.expect(token.DEFAULT)
2097 colon := p.expect(token.COLON)
2098 body := p.parseStmtList()
2099 p.closeScope()
2101 return &ast.CommClause{Case: pos, Comm: comm, Colon: colon, Body: body}
2104 func (p *parser) parseSelectStmt() *ast.SelectStmt {
2105 if p.trace {
2106 defer un(trace(p, "SelectStmt"))
2109 pos := p.expect(token.SELECT)
2110 lbrace := p.expect(token.LBRACE)
2111 var list []ast.Stmt
2112 for p.tok == token.CASE || p.tok == token.DEFAULT {
2113 list = append(list, p.parseCommClause())
2115 rbrace := p.expect(token.RBRACE)
2116 p.expectSemi()
2117 body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
2119 return &ast.SelectStmt{Select: pos, Body: body}
2122 func (p *parser) parseForStmt() ast.Stmt {
2123 if p.trace {
2124 defer un(trace(p, "ForStmt"))
2127 pos := p.expect(token.FOR)
2128 p.openScope()
2129 defer p.closeScope()
2131 var s1, s2, s3 ast.Stmt
2132 var isRange bool
2133 if p.tok != token.LBRACE {
2134 prevLev := p.exprLev
2135 p.exprLev = -1
2136 if p.tok != token.SEMICOLON {
2137 if p.tok == token.RANGE {
2138 // "for range x" (nil lhs in assignment)
2139 pos := p.pos
2140 p.next()
2141 y := []ast.Expr{&ast.UnaryExpr{OpPos: pos, Op: token.RANGE, X: p.parseRhs()}}
2142 s2 = &ast.AssignStmt{Rhs: y}
2143 isRange = true
2144 } else {
2145 s2, isRange = p.parseSimpleStmt(rangeOk)
2148 if !isRange && p.tok == token.SEMICOLON {
2149 p.next()
2150 s1 = s2
2151 s2 = nil
2152 if p.tok != token.SEMICOLON {
2153 s2, _ = p.parseSimpleStmt(basic)
2155 p.expectSemi()
2156 if p.tok != token.LBRACE {
2157 s3, _ = p.parseSimpleStmt(basic)
2160 p.exprLev = prevLev
2163 body := p.parseBlockStmt()
2164 p.expectSemi()
2166 if isRange {
2167 as := s2.(*ast.AssignStmt)
2168 // check lhs
2169 var key, value ast.Expr
2170 switch len(as.Lhs) {
2171 case 0:
2172 // nothing to do
2173 case 1:
2174 key = as.Lhs[0]
2175 case 2:
2176 key, value = as.Lhs[0], as.Lhs[1]
2177 default:
2178 p.errorExpected(as.Lhs[len(as.Lhs)-1].Pos(), "at most 2 expressions")
2179 return &ast.BadStmt{From: pos, To: p.safePos(body.End())}
2181 // parseSimpleStmt returned a right-hand side that
2182 // is a single unary expression of the form "range x"
2183 x := as.Rhs[0].(*ast.UnaryExpr).X
2184 return &ast.RangeStmt{
2185 For: pos,
2186 Key: key,
2187 Value: value,
2188 TokPos: as.TokPos,
2189 Tok: as.Tok,
2190 X: x,
2191 Body: body,
2195 // regular for statement
2196 return &ast.ForStmt{
2197 For: pos,
2198 Init: s1,
2199 Cond: p.makeExpr(s2, "boolean or range expression"),
2200 Post: s3,
2201 Body: body,
2205 func (p *parser) parseStmt() (s ast.Stmt) {
2206 if p.trace {
2207 defer un(trace(p, "Statement"))
2210 switch p.tok {
2211 case token.CONST, token.TYPE, token.VAR:
2212 s = &ast.DeclStmt{Decl: p.parseDecl(stmtStart)}
2213 case
2214 // tokens that may start an expression
2215 token.IDENT, token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING, token.FUNC, token.LPAREN, // operands
2216 token.LBRACK, token.STRUCT, token.MAP, token.CHAN, token.INTERFACE, // composite types
2217 token.ADD, token.SUB, token.MUL, token.AND, token.XOR, token.ARROW, token.NOT: // unary operators
2218 s, _ = p.parseSimpleStmt(labelOk)
2219 // because of the required look-ahead, labeled statements are
2220 // parsed by parseSimpleStmt - don't expect a semicolon after
2221 // them
2222 if _, isLabeledStmt := s.(*ast.LabeledStmt); !isLabeledStmt {
2223 p.expectSemi()
2225 case token.GO:
2226 s = p.parseGoStmt()
2227 case token.DEFER:
2228 s = p.parseDeferStmt()
2229 case token.RETURN:
2230 s = p.parseReturnStmt()
2231 case token.BREAK, token.CONTINUE, token.GOTO, token.FALLTHROUGH:
2232 s = p.parseBranchStmt(p.tok)
2233 case token.LBRACE:
2234 s = p.parseBlockStmt()
2235 p.expectSemi()
2236 case token.IF:
2237 s = p.parseIfStmt()
2238 case token.SWITCH:
2239 s = p.parseSwitchStmt()
2240 case token.SELECT:
2241 s = p.parseSelectStmt()
2242 case token.FOR:
2243 s = p.parseForStmt()
2244 case token.SEMICOLON:
2245 // Is it ever possible to have an implicit semicolon
2246 // producing an empty statement in a valid program?
2247 // (handle correctly anyway)
2248 s = &ast.EmptyStmt{Semicolon: p.pos, Implicit: p.lit == "\n"}
2249 p.next()
2250 case token.RBRACE:
2251 // a semicolon may be omitted before a closing "}"
2252 s = &ast.EmptyStmt{Semicolon: p.pos, Implicit: true}
2253 default:
2254 // no statement found
2255 pos := p.pos
2256 p.errorExpected(pos, "statement")
2257 p.advance(stmtStart)
2258 s = &ast.BadStmt{From: pos, To: p.pos}
2261 return
2264 // ----------------------------------------------------------------------------
2265 // Declarations
2267 type parseSpecFunction func(doc *ast.CommentGroup, keyword token.Token, iota int) ast.Spec
2269 func isValidImport(lit string) bool {
2270 const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD"
2271 s, _ := strconv.Unquote(lit) // go/scanner returns a legal string literal
2272 for _, r := range s {
2273 if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) {
2274 return false
2277 return s != ""
2280 func (p *parser) parseImportSpec(doc *ast.CommentGroup, _ token.Token, _ int) ast.Spec {
2281 if p.trace {
2282 defer un(trace(p, "ImportSpec"))
2285 var ident *ast.Ident
2286 switch p.tok {
2287 case token.PERIOD:
2288 ident = &ast.Ident{NamePos: p.pos, Name: "."}
2289 p.next()
2290 case token.IDENT:
2291 ident = p.parseIdent()
2294 pos := p.pos
2295 var path string
2296 if p.tok == token.STRING {
2297 path = p.lit
2298 if !isValidImport(path) {
2299 p.error(pos, "invalid import path: "+path)
2301 p.next()
2302 } else {
2303 p.expect(token.STRING) // use expect() error handling
2305 p.expectSemi() // call before accessing p.linecomment
2307 // collect imports
2308 spec := &ast.ImportSpec{
2309 Doc: doc,
2310 Name: ident,
2311 Path: &ast.BasicLit{ValuePos: pos, Kind: token.STRING, Value: path},
2312 Comment: p.lineComment,
2314 p.imports = append(p.imports, spec)
2316 return spec
2319 func (p *parser) parseValueSpec(doc *ast.CommentGroup, keyword token.Token, iota int) ast.Spec {
2320 if p.trace {
2321 defer un(trace(p, keyword.String()+"Spec"))
2324 pos := p.pos
2325 idents := p.parseIdentList()
2326 typ := p.tryType()
2327 var values []ast.Expr
2328 // always permit optional initialization for more tolerant parsing
2329 if p.tok == token.ASSIGN {
2330 p.next()
2331 values = p.parseRhsList()
2333 p.expectSemi() // call before accessing p.linecomment
2335 switch keyword {
2336 case token.VAR:
2337 if typ == nil && values == nil {
2338 p.error(pos, "missing variable type or initialization")
2340 case token.CONST:
2341 if values == nil && (iota == 0 || typ != nil) {
2342 p.error(pos, "missing constant value")
2346 // Go spec: The scope of a constant or variable identifier declared inside
2347 // a function begins at the end of the ConstSpec or VarSpec and ends at
2348 // the end of the innermost containing block.
2349 // (Global identifiers are resolved in a separate phase after parsing.)
2350 spec := &ast.ValueSpec{
2351 Doc: doc,
2352 Names: idents,
2353 Type: typ,
2354 Values: values,
2355 Comment: p.lineComment,
2357 kind := ast.Con
2358 if keyword == token.VAR {
2359 kind = ast.Var
2361 p.declare(spec, iota, p.topScope, kind, idents...)
2363 return spec
2366 func (p *parser) parseTypeSpec(doc *ast.CommentGroup, _ token.Token, _ int) ast.Spec {
2367 if p.trace {
2368 defer un(trace(p, "TypeSpec"))
2371 ident := p.parseIdent()
2373 // Go spec: The scope of a type identifier declared inside a function begins
2374 // at the identifier in the TypeSpec and ends at the end of the innermost
2375 // containing block.
2376 // (Global identifiers are resolved in a separate phase after parsing.)
2377 spec := &ast.TypeSpec{Doc: doc, Name: ident}
2378 p.declare(spec, nil, p.topScope, ast.Typ, ident)
2379 if p.tok == token.ASSIGN {
2380 spec.Assign = p.pos
2381 p.next()
2383 spec.Type = p.parseType()
2384 p.expectSemi() // call before accessing p.linecomment
2385 spec.Comment = p.lineComment
2387 return spec
2390 func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.GenDecl {
2391 if p.trace {
2392 defer un(trace(p, "GenDecl("+keyword.String()+")"))
2395 doc := p.leadComment
2396 pos := p.expect(keyword)
2397 var lparen, rparen token.Pos
2398 var list []ast.Spec
2399 if p.tok == token.LPAREN {
2400 lparen = p.pos
2401 p.next()
2402 for iota := 0; p.tok != token.RPAREN && p.tok != token.EOF; iota++ {
2403 list = append(list, f(p.leadComment, keyword, iota))
2405 rparen = p.expect(token.RPAREN)
2406 p.expectSemi()
2407 } else {
2408 list = append(list, f(nil, keyword, 0))
2411 return &ast.GenDecl{
2412 Doc: doc,
2413 TokPos: pos,
2414 Tok: keyword,
2415 Lparen: lparen,
2416 Specs: list,
2417 Rparen: rparen,
2421 func (p *parser) parseFuncDecl() *ast.FuncDecl {
2422 if p.trace {
2423 defer un(trace(p, "FunctionDecl"))
2426 doc := p.leadComment
2427 pos := p.expect(token.FUNC)
2428 scope := ast.NewScope(p.topScope) // function scope
2430 var recv *ast.FieldList
2431 if p.tok == token.LPAREN {
2432 recv = p.parseParameters(scope, false)
2435 ident := p.parseIdent()
2437 params, results := p.parseSignature(scope)
2439 var body *ast.BlockStmt
2440 if p.tok == token.LBRACE {
2441 body = p.parseBody(scope)
2443 p.expectSemi()
2445 decl := &ast.FuncDecl{
2446 Doc: doc,
2447 Recv: recv,
2448 Name: ident,
2449 Type: &ast.FuncType{
2450 Func: pos,
2451 Params: params,
2452 Results: results,
2454 Body: body,
2456 if recv == nil {
2457 // Go spec: The scope of an identifier denoting a constant, type,
2458 // variable, or function (but not method) declared at top level
2459 // (outside any function) is the package block.
2461 // init() functions cannot be referred to and there may
2462 // be more than one - don't put them in the pkgScope
2463 if ident.Name != "init" {
2464 p.declare(decl, nil, p.pkgScope, ast.Fun, ident)
2468 return decl
2471 func (p *parser) parseDecl(sync map[token.Token]bool) ast.Decl {
2472 if p.trace {
2473 defer un(trace(p, "Declaration"))
2476 var f parseSpecFunction
2477 switch p.tok {
2478 case token.CONST, token.VAR:
2479 f = p.parseValueSpec
2481 case token.TYPE:
2482 f = p.parseTypeSpec
2484 case token.FUNC:
2485 return p.parseFuncDecl()
2487 default:
2488 pos := p.pos
2489 p.errorExpected(pos, "declaration")
2490 p.advance(sync)
2491 return &ast.BadDecl{From: pos, To: p.pos}
2494 return p.parseGenDecl(p.tok, f)
2497 // ----------------------------------------------------------------------------
2498 // Source files
2500 func (p *parser) parseFile() *ast.File {
2501 if p.trace {
2502 defer un(trace(p, "File"))
2505 // Don't bother parsing the rest if we had errors scanning the first token.
2506 // Likely not a Go source file at all.
2507 if p.errors.Len() != 0 {
2508 return nil
2511 // package clause
2512 doc := p.leadComment
2513 pos := p.expect(token.PACKAGE)
2514 // Go spec: The package clause is not a declaration;
2515 // the package name does not appear in any scope.
2516 ident := p.parseIdent()
2517 if ident.Name == "_" && p.mode&DeclarationErrors != 0 {
2518 p.error(p.pos, "invalid package name _")
2520 p.expectSemi()
2522 // Don't bother parsing the rest if we had errors parsing the package clause.
2523 // Likely not a Go source file at all.
2524 if p.errors.Len() != 0 {
2525 return nil
2528 p.openScope()
2529 p.pkgScope = p.topScope
2530 var decls []ast.Decl
2531 if p.mode&PackageClauseOnly == 0 {
2532 // import decls
2533 for p.tok == token.IMPORT {
2534 decls = append(decls, p.parseGenDecl(token.IMPORT, p.parseImportSpec))
2537 if p.mode&ImportsOnly == 0 {
2538 // rest of package body
2539 for p.tok != token.EOF {
2540 decls = append(decls, p.parseDecl(declStart))
2544 p.closeScope()
2545 assert(p.topScope == nil, "unbalanced scopes")
2546 assert(p.labelScope == nil, "unbalanced label scopes")
2548 // resolve global identifiers within the same file
2549 i := 0
2550 for _, ident := range p.unresolved {
2551 // i <= index for current ident
2552 assert(ident.Obj == unresolved, "object already resolved")
2553 ident.Obj = p.pkgScope.Lookup(ident.Name) // also removes unresolved sentinel
2554 if ident.Obj == nil {
2555 p.unresolved[i] = ident
2560 return &ast.File{
2561 Doc: doc,
2562 Package: pos,
2563 Name: ident,
2564 Decls: decls,
2565 Scope: p.pkgScope,
2566 Imports: p.imports,
2567 Unresolved: p.unresolved[0:i],
2568 Comments: p.comments,